repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
poryfly/scikit-learn
|
refs/heads/master
|
sklearn/externals/joblib/memory.py
|
194
|
"""
A context object for caching a function's return value each time it
is called with the same input arguments.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
from __future__ import with_statement
import os
import shutil
import time
import pydoc
import re
import sys
try:
import cPickle as pickle
except ImportError:
import pickle
import functools
import traceback
import warnings
import inspect
import json
import weakref
import io
# Local imports
from . import hashing
from .func_inspect import get_func_code, get_func_name, filter_args
from .func_inspect import format_signature, format_call
from ._memory_helpers import open_py_source
from .logger import Logger, format_time, pformat
from . import numpy_pickle
from .disk import mkdirp, rm_subdirs
from ._compat import _basestring
FIRST_LINE_TEXT = "# first line:"
# TODO: The following object should have a data store object as a sub
# object, and the interface to persist and query should be separated in
# the data store.
#
# This would enable creating 'Memory' objects with a different logic for
# pickling that would simply span a MemorizedFunc with the same
# store (or do we want to copy it to avoid cross-talks?), for instance to
# implement HDF5 pickling.
# TODO: Same remark for the logger, and probably use the Python logging
# mechanism.
def extract_first_line(func_code):
""" Extract the first line information from the function code
text if available.
"""
if func_code.startswith(FIRST_LINE_TEXT):
func_code = func_code.split('\n')
first_line = int(func_code[0][len(FIRST_LINE_TEXT):])
func_code = '\n'.join(func_code[1:])
else:
first_line = -1
return func_code, first_line
class JobLibCollisionWarning(UserWarning):
""" Warn that there might be a collision between names of functions.
"""
def _get_func_fullname(func):
"""Compute the part of part associated with a function.
See code of_cache_key_to_dir() for details
"""
modules, funcname = get_func_name(func)
modules.append(funcname)
return os.path.join(*modules)
def _cache_key_to_dir(cachedir, func, argument_hash):
"""Compute directory associated with a given cache key.
func can be a function or a string as returned by _get_func_fullname().
"""
parts = [cachedir]
if isinstance(func, _basestring):
parts.append(func)
else:
parts.append(_get_func_fullname(func))
if argument_hash is not None:
parts.append(argument_hash)
return os.path.join(*parts)
def _load_output(output_dir, func_name, timestamp=None, metadata=None,
mmap_mode=None, verbose=0):
"""Load output of a computation."""
if verbose > 1:
signature = ""
try:
if metadata is not None:
args = ", ".join(['%s=%s' % (name, value)
for name, value
in metadata['input_args'].items()])
signature = "%s(%s)" % (os.path.basename(func_name),
args)
else:
signature = os.path.basename(func_name)
except KeyError:
pass
if timestamp is not None:
t = "% 16s" % format_time(time.time() - timestamp)
else:
t = ""
if verbose < 10:
print('[Memory]%s: Loading %s...' % (t, str(signature)))
else:
print('[Memory]%s: Loading %s from %s' % (
t, str(signature), output_dir))
filename = os.path.join(output_dir, 'output.pkl')
if not os.path.isfile(filename):
raise KeyError(
"Non-existing cache value (may have been cleared).\n"
"File %s does not exist" % filename)
return numpy_pickle.load(filename, mmap_mode=mmap_mode)
# An in-memory store to avoid looking at the disk-based function
# source code to check if a function definition has changed
_FUNCTION_HASHES = weakref.WeakKeyDictionary()
###############################################################################
# class `MemorizedResult`
###############################################################################
class MemorizedResult(Logger):
"""Object representing a cached value.
Attributes
----------
cachedir: string
path to root of joblib cache
func: function or string
function whose output is cached. The string case is intended only for
instanciation based on the output of repr() on another instance.
(namely eval(repr(memorized_instance)) works).
argument_hash: string
hash of the function arguments
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}
The memmapping mode used when loading from cache numpy arrays. See
numpy.load for the meaning of the different values.
verbose: int
verbosity level (0 means no message)
timestamp, metadata: string
for internal use only
"""
def __init__(self, cachedir, func, argument_hash,
mmap_mode=None, verbose=0, timestamp=None, metadata=None):
Logger.__init__(self)
if isinstance(func, _basestring):
self.func = func
else:
self.func = _get_func_fullname(func)
self.argument_hash = argument_hash
self.cachedir = cachedir
self.mmap_mode = mmap_mode
self._output_dir = _cache_key_to_dir(cachedir, self.func,
argument_hash)
if metadata is not None:
self.metadata = metadata
else:
self.metadata = {}
# No error is relevant here.
try:
with open(os.path.join(self._output_dir, 'metadata.json'),
'rb') as f:
self.metadata = json.load(f)
except:
pass
self.duration = self.metadata.get('duration', None)
self.verbose = verbose
self.timestamp = timestamp
def get(self):
"""Read value from cache and return it."""
return _load_output(self._output_dir, _get_func_fullname(self.func),
timestamp=self.timestamp,
metadata=self.metadata, mmap_mode=self.mmap_mode,
verbose=self.verbose)
def clear(self):
"""Clear value from cache"""
shutil.rmtree(self._output_dir, ignore_errors=True)
def __repr__(self):
return ('{class_name}(cachedir="{cachedir}", func="{func}", '
'argument_hash="{argument_hash}")'.format(
class_name=self.__class__.__name__,
cachedir=self.cachedir,
func=self.func,
argument_hash=self.argument_hash
))
def __reduce__(self):
return (self.__class__, (self.cachedir, self.func, self.argument_hash),
{'mmap_mode': self.mmap_mode})
class NotMemorizedResult(object):
"""Class representing an arbitrary value.
This class is a replacement for MemorizedResult when there is no cache.
"""
__slots__ = ('value', 'valid')
def __init__(self, value):
self.value = value
self.valid = True
def get(self):
if self.valid:
return self.value
else:
raise KeyError("No value stored.")
def clear(self):
self.valid = False
self.value = None
def __repr__(self):
if self.valid:
return '{class_name}({value})'.format(
class_name=self.__class__.__name__,
value=pformat(self.value)
)
else:
return self.__class__.__name__ + ' with no value'
# __getstate__ and __setstate__ are required because of __slots__
def __getstate__(self):
return {"valid": self.valid, "value": self.value}
def __setstate__(self, state):
self.valid = state["valid"]
self.value = state["value"]
###############################################################################
# class `NotMemorizedFunc`
###############################################################################
class NotMemorizedFunc(object):
"""No-op object decorating a function.
This class replaces MemorizedFunc when there is no cache. It provides an
identical API but does not write anything on disk.
Attributes
----------
func: callable
Original undecorated function.
"""
# Should be a light as possible (for speed)
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def call_and_shelve(self, *args, **kwargs):
return NotMemorizedResult(self.func(*args, **kwargs))
def __reduce__(self):
return (self.__class__, (self.func,))
def __repr__(self):
return '%s(func=%s)' % (
self.__class__.__name__,
self.func
)
def clear(self, warn=True):
# Argument "warn" is for compatibility with MemorizedFunc.clear
pass
###############################################################################
# class `MemorizedFunc`
###############################################################################
class MemorizedFunc(Logger):
""" Callable object decorating a function for caching its return value
each time it is called.
All values are cached on the filesystem, in a deep directory
structure. Methods are provided to inspect the cache or clean it.
Attributes
----------
func: callable
The original, undecorated, function.
cachedir: string
Path to the base cache directory of the memory context.
ignore: list or None
List of variable names to ignore when choosing whether to
recompute.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}
The memmapping mode used when loading from cache
numpy arrays. See numpy.load for the meaning of the different
values.
compress: boolean, or integer
Whether to zip the stored data on disk. If an integer is
given, it should be between 1 and 9, and sets the amount
of compression. Note that compressed arrays cannot be
read by memmapping.
verbose: int, optional
The verbosity flag, controls messages that are issued as
the function is evaluated.
"""
#-------------------------------------------------------------------------
# Public interface
#-------------------------------------------------------------------------
def __init__(self, func, cachedir, ignore=None, mmap_mode=None,
compress=False, verbose=1, timestamp=None):
"""
Parameters
----------
func: callable
The function to decorate
cachedir: string
The path of the base directory to use as a data store
ignore: list or None
List of variable names to ignore.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
The memmapping mode used when loading from cache
numpy arrays. See numpy.load for the meaning of the
arguments.
compress : boolean, or integer
Whether to zip the stored data on disk. If an integer is
given, it should be between 1 and 9, and sets the amount
of compression. Note that compressed arrays cannot be
read by memmapping.
verbose: int, optional
Verbosity flag, controls the debug messages that are issued
as functions are evaluated. The higher, the more verbose
timestamp: float, optional
The reference time from which times in tracing messages
are reported.
"""
Logger.__init__(self)
self.mmap_mode = mmap_mode
self.func = func
if ignore is None:
ignore = []
self.ignore = ignore
self._verbose = verbose
self.cachedir = cachedir
self.compress = compress
if compress and self.mmap_mode is not None:
warnings.warn('Compressed results cannot be memmapped',
stacklevel=2)
if timestamp is None:
timestamp = time.time()
self.timestamp = timestamp
mkdirp(self.cachedir)
try:
functools.update_wrapper(self, func)
except:
" Objects like ufunc don't like that "
if inspect.isfunction(func):
doc = pydoc.TextDoc().document(func)
# Remove blank line
doc = doc.replace('\n', '\n\n', 1)
# Strip backspace-overprints for compatibility with autodoc
doc = re.sub('\x08.', '', doc)
else:
# Pydoc does a poor job on other objects
doc = func.__doc__
self.__doc__ = 'Memoized version of %s' % doc
def _cached_call(self, args, kwargs):
"""Call wrapped function and cache result, or read cache if available.
This function returns the wrapped function output and some metadata.
Returns
-------
output: value or tuple
what is returned by wrapped function
argument_hash: string
hash of function arguments
metadata: dict
some metadata about wrapped function call (see _persist_input())
"""
# Compare the function code with the previous to see if the
# function code has changed
output_dir, argument_hash = self._get_output_dir(*args, **kwargs)
metadata = None
# FIXME: The statements below should be try/excepted
if not (self._check_previous_func_code(stacklevel=4) and
os.path.exists(output_dir)):
if self._verbose > 10:
_, name = get_func_name(self.func)
self.warn('Computing func %s, argument hash %s in '
'directory %s'
% (name, argument_hash, output_dir))
out, metadata = self.call(*args, **kwargs)
if self.mmap_mode is not None:
# Memmap the output at the first call to be consistent with
# later calls
out = _load_output(output_dir, _get_func_fullname(self.func),
timestamp=self.timestamp,
mmap_mode=self.mmap_mode,
verbose=self._verbose)
else:
try:
t0 = time.time()
out = _load_output(output_dir, _get_func_fullname(self.func),
timestamp=self.timestamp,
metadata=metadata, mmap_mode=self.mmap_mode,
verbose=self._verbose)
if self._verbose > 4:
t = time.time() - t0
_, name = get_func_name(self.func)
msg = '%s cache loaded - %s' % (name, format_time(t))
print(max(0, (80 - len(msg))) * '_' + msg)
except Exception:
# XXX: Should use an exception logger
self.warn('Exception while loading results for '
'(args=%s, kwargs=%s)\n %s' %
(args, kwargs, traceback.format_exc()))
shutil.rmtree(output_dir, ignore_errors=True)
out, metadata = self.call(*args, **kwargs)
argument_hash = None
return (out, argument_hash, metadata)
def call_and_shelve(self, *args, **kwargs):
"""Call wrapped function, cache result and return a reference.
This method returns a reference to the cached result instead of the
result itself. The reference object is small and pickeable, allowing
to send or store it easily. Call .get() on reference object to get
result.
Returns
-------
cached_result: MemorizedResult or NotMemorizedResult
reference to the value returned by the wrapped function. The
class "NotMemorizedResult" is used when there is no cache
activated (e.g. cachedir=None in Memory).
"""
_, argument_hash, metadata = self._cached_call(args, kwargs)
return MemorizedResult(self.cachedir, self.func, argument_hash,
metadata=metadata, verbose=self._verbose - 1,
timestamp=self.timestamp)
def __call__(self, *args, **kwargs):
return self._cached_call(args, kwargs)[0]
def __reduce__(self):
""" We don't store the timestamp when pickling, to avoid the hash
depending from it.
In addition, when unpickling, we run the __init__
"""
return (self.__class__, (self.func, self.cachedir, self.ignore,
self.mmap_mode, self.compress, self._verbose))
def format_signature(self, *args, **kwargs):
warnings.warn("MemorizedFunc.format_signature will be removed in a "
"future version of joblib.", DeprecationWarning)
return format_signature(self.func, *args, **kwargs)
def format_call(self, *args, **kwargs):
warnings.warn("MemorizedFunc.format_call will be removed in a "
"future version of joblib.", DeprecationWarning)
return format_call(self.func, args, kwargs)
#-------------------------------------------------------------------------
# Private interface
#-------------------------------------------------------------------------
def _get_argument_hash(self, *args, **kwargs):
return hashing.hash(filter_args(self.func, self.ignore,
args, kwargs),
coerce_mmap=(self.mmap_mode is not None))
def _get_output_dir(self, *args, **kwargs):
""" Return the directory in which are persisted the result
of the function called with the given arguments.
"""
argument_hash = self._get_argument_hash(*args, **kwargs)
output_dir = os.path.join(self._get_func_dir(self.func),
argument_hash)
return output_dir, argument_hash
get_output_dir = _get_output_dir # backward compatibility
def _get_func_dir(self, mkdir=True):
""" Get the directory corresponding to the cache for the
function.
"""
func_dir = _cache_key_to_dir(self.cachedir, self.func, None)
if mkdir:
mkdirp(func_dir)
return func_dir
def _hash_func(self):
"""Hash a function to key the online cache"""
func_code_h = hash(getattr(self.func, '__code__', None))
return id(self.func), hash(self.func), func_code_h
def _write_func_code(self, filename, func_code, first_line):
""" Write the function code and the filename to a file.
"""
# We store the first line because the filename and the function
# name is not always enough to identify a function: people
# sometimes have several functions named the same way in a
# file. This is bad practice, but joblib should be robust to bad
# practice.
func_code = u'%s %i\n%s' % (FIRST_LINE_TEXT, first_line, func_code)
with io.open(filename, 'w', encoding="UTF-8") as out:
out.write(func_code)
# Also store in the in-memory store of function hashes
is_named_callable = False
if sys.version_info[0] > 2:
is_named_callable = (hasattr(self.func, '__name__')
and self.func.__name__ != '<lambda>')
else:
is_named_callable = (hasattr(self.func, 'func_name')
and self.func.func_name != '<lambda>')
if is_named_callable:
# Don't do this for lambda functions or strange callable
# objects, as it ends up being too fragile
func_hash = self._hash_func()
try:
_FUNCTION_HASHES[self.func] = func_hash
except TypeError:
# Some callable are not hashable
pass
def _check_previous_func_code(self, stacklevel=2):
"""
stacklevel is the depth a which this function is called, to
issue useful warnings to the user.
"""
# First check if our function is in the in-memory store.
# Using the in-memory store not only makes things faster, but it
# also renders us robust to variations of the files when the
# in-memory version of the code does not vary
try:
if self.func in _FUNCTION_HASHES:
# We use as an identifier the id of the function and its
# hash. This is more likely to falsely change than have hash
# collisions, thus we are on the safe side.
func_hash = self._hash_func()
if func_hash == _FUNCTION_HASHES[self.func]:
return True
except TypeError:
# Some callables are not hashable
pass
# Here, we go through some effort to be robust to dynamically
# changing code and collision. We cannot inspect.getsource
# because it is not reliable when using IPython's magic "%run".
func_code, source_file, first_line = get_func_code(self.func)
func_dir = self._get_func_dir()
func_code_file = os.path.join(func_dir, 'func_code.py')
try:
with io.open(func_code_file, encoding="UTF-8") as infile:
old_func_code, old_first_line = \
extract_first_line(infile.read())
except IOError:
self._write_func_code(func_code_file, func_code, first_line)
return False
if old_func_code == func_code:
return True
# We have differing code, is this because we are referring to
# different functions, or because the function we are referring to has
# changed?
_, func_name = get_func_name(self.func, resolv_alias=False,
win_characters=False)
if old_first_line == first_line == -1 or func_name == '<lambda>':
if not first_line == -1:
func_description = '%s (%s:%i)' % (func_name,
source_file, first_line)
else:
func_description = func_name
warnings.warn(JobLibCollisionWarning(
"Cannot detect name collisions for function '%s'"
% func_description), stacklevel=stacklevel)
# Fetch the code at the old location and compare it. If it is the
# same than the code store, we have a collision: the code in the
# file has not changed, but the name we have is pointing to a new
# code block.
if not old_first_line == first_line and source_file is not None:
possible_collision = False
if os.path.exists(source_file):
_, func_name = get_func_name(self.func, resolv_alias=False)
num_lines = len(func_code.split('\n'))
with open_py_source(source_file) as f:
on_disk_func_code = f.readlines()[
old_first_line - 1:old_first_line - 1 + num_lines - 1]
on_disk_func_code = ''.join(on_disk_func_code)
possible_collision = (on_disk_func_code.rstrip()
== old_func_code.rstrip())
else:
possible_collision = source_file.startswith('<doctest ')
if possible_collision:
warnings.warn(JobLibCollisionWarning(
'Possible name collisions between functions '
"'%s' (%s:%i) and '%s' (%s:%i)" %
(func_name, source_file, old_first_line,
func_name, source_file, first_line)),
stacklevel=stacklevel)
# The function has changed, wipe the cache directory.
# XXX: Should be using warnings, and giving stacklevel
if self._verbose > 10:
_, func_name = get_func_name(self.func, resolv_alias=False)
self.warn("Function %s (stored in %s) has changed." %
(func_name, func_dir))
self.clear(warn=True)
return False
def clear(self, warn=True):
""" Empty the function's cache.
"""
func_dir = self._get_func_dir(mkdir=False)
if self._verbose > 0 and warn:
self.warn("Clearing cache %s" % func_dir)
if os.path.exists(func_dir):
shutil.rmtree(func_dir, ignore_errors=True)
mkdirp(func_dir)
func_code, _, first_line = get_func_code(self.func)
func_code_file = os.path.join(func_dir, 'func_code.py')
self._write_func_code(func_code_file, func_code, first_line)
def call(self, *args, **kwargs):
""" Force the execution of the function with the given arguments and
persist the output values.
"""
start_time = time.time()
output_dir, _ = self._get_output_dir(*args, **kwargs)
if self._verbose > 0:
print(format_call(self.func, args, kwargs))
output = self.func(*args, **kwargs)
self._persist_output(output, output_dir)
duration = time.time() - start_time
metadata = self._persist_input(output_dir, duration, args, kwargs)
if self._verbose > 0:
_, name = get_func_name(self.func)
msg = '%s - %s' % (name, format_time(duration))
print(max(0, (80 - len(msg))) * '_' + msg)
return output, metadata
# Make public
def _persist_output(self, output, dir):
""" Persist the given output tuple in the directory.
"""
try:
mkdirp(dir)
filename = os.path.join(dir, 'output.pkl')
numpy_pickle.dump(output, filename, compress=self.compress)
if self._verbose > 10:
print('Persisting in %s' % dir)
except OSError:
" Race condition in the creation of the directory "
def _persist_input(self, output_dir, duration, args, kwargs,
this_duration_limit=0.5):
""" Save a small summary of the call using json format in the
output directory.
output_dir: string
directory where to write metadata.
duration: float
time taken by hashing input arguments, calling the wrapped
function and persisting its output.
args, kwargs: list and dict
input arguments for wrapped function
this_duration_limit: float
Max execution time for this function before issuing a warning.
"""
start_time = time.time()
argument_dict = filter_args(self.func, self.ignore,
args, kwargs)
input_repr = dict((k, repr(v)) for k, v in argument_dict.items())
# This can fail due to race-conditions with multiple
# concurrent joblibs removing the file or the directory
metadata = {"duration": duration, "input_args": input_repr}
try:
mkdirp(output_dir)
with open(os.path.join(output_dir, 'metadata.json'), 'w') as f:
json.dump(metadata, f)
except:
pass
this_duration = time.time() - start_time
if this_duration > this_duration_limit:
# This persistence should be fast. It will not be if repr() takes
# time and its output is large, because json.dump will have to
# write a large file. This should not be an issue with numpy arrays
# for which repr() always output a short representation, but can
# be with complex dictionaries. Fixing the problem should be a
# matter of replacing repr() above by something smarter.
warnings.warn("Persisting input arguments took %.2fs to run.\n"
"If this happens often in your code, it can cause "
"performance problems \n"
"(results will be correct in all cases). \n"
"The reason for this is probably some large input "
"arguments for a wrapped\n"
" function (e.g. large strings).\n"
"THIS IS A JOBLIB ISSUE. If you can, kindly provide "
"the joblib's team with an\n"
" example so that they can fix the problem."
% this_duration, stacklevel=5)
return metadata
def load_output(self, output_dir):
""" Read the results of a previous calculation from the directory
it was cached in.
"""
warnings.warn("MemorizedFunc.load_output is deprecated and will be "
"removed in a future version\n"
"of joblib. A MemorizedResult provides similar features",
DeprecationWarning)
# No metadata available here.
return _load_output(output_dir, _get_func_fullname(self.func),
timestamp=self.timestamp,
mmap_mode=self.mmap_mode, verbose=self._verbose)
# XXX: Need a method to check if results are available.
#-------------------------------------------------------------------------
# Private `object` interface
#-------------------------------------------------------------------------
def __repr__(self):
return '%s(func=%s, cachedir=%s)' % (
self.__class__.__name__,
self.func,
repr(self.cachedir),
)
###############################################################################
# class `Memory`
###############################################################################
class Memory(Logger):
""" A context object for caching a function's return value each time it
is called with the same input arguments.
All values are cached on the filesystem, in a deep directory
structure.
see :ref:`memory_reference`
"""
#-------------------------------------------------------------------------
# Public interface
#-------------------------------------------------------------------------
def __init__(self, cachedir, mmap_mode=None, compress=False, verbose=1):
"""
Parameters
----------
cachedir: string or None
The path of the base directory to use as a data store
or None. If None is given, no caching is done and
the Memory object is completely transparent.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
The memmapping mode used when loading from cache
numpy arrays. See numpy.load for the meaning of the
arguments.
compress: boolean, or integer
Whether to zip the stored data on disk. If an integer is
given, it should be between 1 and 9, and sets the amount
of compression. Note that compressed arrays cannot be
read by memmapping.
verbose: int, optional
Verbosity flag, controls the debug messages that are issued
as functions are evaluated.
"""
# XXX: Bad explanation of the None value of cachedir
Logger.__init__(self)
self._verbose = verbose
self.mmap_mode = mmap_mode
self.timestamp = time.time()
self.compress = compress
if compress and mmap_mode is not None:
warnings.warn('Compressed results cannot be memmapped',
stacklevel=2)
if cachedir is None:
self.cachedir = None
else:
self.cachedir = os.path.join(cachedir, 'joblib')
mkdirp(self.cachedir)
def cache(self, func=None, ignore=None, verbose=None,
mmap_mode=False):
""" Decorates the given function func to only compute its return
value for input arguments not cached on disk.
Parameters
----------
func: callable, optional
The function to be decorated
ignore: list of strings
A list of arguments name to ignore in the hashing
verbose: integer, optional
The verbosity mode of the function. By default that
of the memory object is used.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
The memmapping mode used when loading from cache
numpy arrays. See numpy.load for the meaning of the
arguments. By default that of the memory object is used.
Returns
-------
decorated_func: MemorizedFunc object
The returned object is a MemorizedFunc object, that is
callable (behaves like a function), but offers extra
methods for cache lookup and management. See the
documentation for :class:`joblib.memory.MemorizedFunc`.
"""
if func is None:
# Partial application, to be able to specify extra keyword
# arguments in decorators
return functools.partial(self.cache, ignore=ignore,
verbose=verbose, mmap_mode=mmap_mode)
if self.cachedir is None:
return NotMemorizedFunc(func)
if verbose is None:
verbose = self._verbose
if mmap_mode is False:
mmap_mode = self.mmap_mode
if isinstance(func, MemorizedFunc):
func = func.func
return MemorizedFunc(func, cachedir=self.cachedir,
mmap_mode=mmap_mode,
ignore=ignore,
compress=self.compress,
verbose=verbose,
timestamp=self.timestamp)
def clear(self, warn=True):
""" Erase the complete cache directory.
"""
if warn:
self.warn('Flushing completely the cache')
rm_subdirs(self.cachedir)
def eval(self, func, *args, **kwargs):
""" Eval function func with arguments `*args` and `**kwargs`,
in the context of the memory.
This method works similarly to the builtin `apply`, except
that the function is called only if the cache is not
up to date.
"""
if self.cachedir is None:
return func(*args, **kwargs)
return self.cache(func)(*args, **kwargs)
#-------------------------------------------------------------------------
# Private `object` interface
#-------------------------------------------------------------------------
def __repr__(self):
return '%s(cachedir=%s)' % (
self.__class__.__name__,
repr(self.cachedir),
)
def __reduce__(self):
""" We don't store the timestamp when pickling, to avoid the hash
depending from it.
In addition, when unpickling, we run the __init__
"""
# We need to remove 'joblib' from the end of cachedir
cachedir = self.cachedir[:-7] if self.cachedir is not None else None
return (self.__class__, (cachedir,
self.mmap_mode, self.compress, self._verbose))
|
rajsadho/django
|
refs/heads/master
|
tests/postgres_tests/__init__.py
|
315
|
import unittest
from django.db import connection
from django.test import TestCase
@unittest.skipUnless(connection.vendor == 'postgresql', "PostgreSQL specific tests")
class PostgreSQLTestCase(TestCase):
pass
|
LSSTDESC/LSSTDarkMatter
|
refs/heads/master
|
streamsim/instcat.py
|
1
|
#!/usr/bin/env python
"""
Convert a simulated set of stars into an instcat file for imsim.
"""
__author__ = "Alex Drlica-Wagner"
import os
import copy
from collections import OrderedDict as odict
import numpy as np
FILTER = 'g'
MAG = 'MAG_%s'%FILTER.upper()
MAGLIMS =odict([('u',23),('g',25),('r',25),('i',24.5),('z',24),('y',23)])
FILTERS = odict([('u',0),('g',1),('r',2),('i',3),('z',4),('y',5)])
DEFAULTS = odict([
('rightascension', 31.1133844),
('declination', -10.0970060),
('mjd', 59797.2854090),
('altitude', 43.6990272),
('azimuth', 73.7707957),
('filter', 2),
('rotskypos', 69.0922930),
('rottelpos', 0.000),
('dist2moon', 145.1095257),
('moonalt', -11.1383568),
('moondec', -18.7702120),
('moonphase', 59.6288830),
('moonra', 230.9832941),
('nsnap', 2),
('obshistid', 1),
('seed', 1),
('seeing', 0.7613760),
('rawSeeing', 0.7613760),
('FWHMgeom', 1.210734),
('FWHMeff', 1.409653),
('sunalt', -59.1098785),
('vistime', 33.0000000),
])
POINT = "object %(ID)i %(RA)12.6f %(DEC)12.6f %({})12.6f starSED/phoSimMLT/lte031-4.5-1.0a+0.4.BT-Settl.spec.gz 0 0 0 0 0 0 point none CCM 0.0 3.1".format(MAG)
SERSIC = "object %(ID)i %(RA)12.6f %(DEC)12.6f %({})12.6f starSED/phoSimMLT/lte031-4.5-1.0a+0.4.BT-Settl.spec.gz 0 0 0 0 0 0 sersic2d %(EXT)12.6f %(EXT)12.6f 0 0.5 none CCM 0.0 3.1".format(MAG)
def set_defaults(kwargs,defaults):
for k,v in defaults.items():
kwargs.setdefault(k,v)
return kwargs
def read_instcat(filename,**kwargs):
""" Read an instcat file into a numpy.recarray """
defaults =dict(skip_header=23,usecols=[1,2,3,4],
dtype=[('ID',int),('RA',float),('DEC',float),('MAG',float)])
set_defaults(kwargs,defaults)
return np.genfromtxt(filename,**kwargs)
class InstCatWriter(object):
def write_toppart(self, filename, dwarf, force=True):
if isinstance(filename,basestring):
if os.path.exists(filename) and not force:
msg = "File %s already exists"%filename
raise IOError(msg)
out = open(filename,'w')
elif isinstance(filename,file):
out = filename
else:
msg ="Unrecongnized file object"
raise IOError(msg)
out.write(self.comment_string())
out.write(self.header_string(dwarf))
return out
def write_middlepart(self, filename, dwarf, data, idx, force=True):
if isinstance(filename,basestring):
if os.path.exists(filename) and not force:
msg = "File %s already exists"%filename
raise IOError(msg)
out = open(filename,'a')
elif isinstance(filename,file):
out = filename
else:
msg ="Unrecongnized file object"
raise IOError(msg)
out.write(self.unresolved_string(dwarf,data, idx))
return out
def write_endpart(self, filename, dwarf, data, force=True):
if isinstance(filename,basestring):
if os.path.exists(filename) and not force:
msg = "File %s already exists"%filename
raise IOError(msg)
out = open(filename,'a')
elif isinstance(filename,file):
out = filename
else:
msg ="Unrecongnized file object"
raise IOError(msg)
out.write(self.resolved_string(dwarf,data))
out.write('\n')
return out
def comment_string(self,comment=None):
if comment is None: comment = "instcat file written by dsphsim"
return "# %s \n"%comment
def header_string(self, dwarf,**kwargs):
header = self.parse_dwarf(dwarf,**kwargs)
return "\n".join(["%s %s"%(k,v) for k,v in header.items()])+'\n'
def parse_dwarf(self, dwarf, **kwargs):
header = set_defaults(odict(),DEFAULTS)
header.update(kwargs)
header['rightascension'] = dwarf.lon
header['declination'] = dwarf.lat
header['filter'] = FILTERS[FILTER]
return header
def resolved_string(self, dwarf, data, **kwargs):
sel = (data[MAG] <= MAGLIMS[FILTER])
return '\n'.join(np.char.mod([POINT],data[sel]))
def unresolved_string(self, dwarf, data, idx, **kwargs):
sel = (data[MAG] > MAGLIMS[FILTER])
params = dict(ID=idx,
RA=dwarf.lon,
DEC=dwarf.lat,
EXT=dwarf.extension*3600 / 1.68, # arcsec
)
params[MAG] = self.cumulative_magnitude(data[sel])
return SERSIC%(params) + '\n'
def cumulative_magnitude(self,data):
return -2.5 * np.log10(np.sum(10**(data[MAG]/-2.5)))
|
uwosh/UWOshSuccess
|
refs/heads/master
|
__init__.py
|
1
|
# There are three ways to inject custom code here:
#
# - To set global configuration variables, create a file AppConfig.py.
# This will be imported in config.py, which in turn is imported in
# each generated class and in this file.
# - To perform custom initialisation after types have been registered,
# use the protected code section at the bottom of initialize().
import logging
logger = logging.getLogger('UWOshSuccess')
logger.debug('Installing Product')
import os
import os.path
from Globals import package_home
import Products.CMFPlone.interfaces
from Products.Archetypes import listTypes
from Products.Archetypes.atapi import *
from Products.Archetypes.utils import capitalize
from Products.CMFCore import DirectoryView
from Products.CMFCore import permissions as cmfpermissions
from Products.CMFCore import utils as cmfutils
from Products.CMFPlone.utils import ToolInit
from config import *
DirectoryView.registerDirectory('skins', product_globals)
def initialize(context):
"""initialize product (called by zope)"""
import content
# Initialize portal content
all_content_types, all_constructors, all_ftis = process_types(
listTypes(PROJECTNAME),
PROJECTNAME)
cmfutils.ContentInit(
PROJECTNAME + ' Content',
content_types = all_content_types,
permission = DEFAULT_ADD_CONTENT_PERMISSION,
extra_constructors = all_constructors,
fti = all_ftis,
).initialize(context)
# Give it some extra permissions to control them on a per class limit
for i in range(0,len(all_content_types)):
klassname=all_content_types[i].__name__
if not klassname in ADD_CONTENT_PERMISSIONS:
continue
context.registerClass(meta_type = all_ftis[i]['meta_type'],
constructors= (all_constructors[i],),
permission = ADD_CONTENT_PERMISSIONS[klassname])
|
cdubz/babybuddy
|
refs/heads/master
|
api/permissions.py
|
1
|
# -*- coding: utf-8 -*-
from rest_framework.permissions import DjangoModelPermissions
class BabyBuddyDjangoModelPermissions(DjangoModelPermissions):
perms_map = {
'GET': ['%(app_label)s.view_%(model_name)s'],
'OPTIONS': ['%(app_label)s.add_%(model_name)s'],
'HEAD': [],
'POST': ['%(app_label)s.add_%(model_name)s'],
# 'PUT': ['%(app_label)s.change_%(model_name)s'],
# 'PATCH': ['%(app_label)s.change_%(model_name)s'],
# 'DELETE': ['%(app_label)s.delete_%(model_name)s'],
}
|
promptworks/keystone
|
refs/heads/master
|
keystone/common/wsgi.py
|
2
|
# Copyright 2012 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for working with WSGI servers."""
import copy
import itertools
import urllib
from oslo_config import cfg
import oslo_i18n
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import strutils
import routes.middleware
import six
import webob.dec
import webob.exc
from keystone.common import dependency
from keystone.common import json_home
from keystone.common import utils
from keystone import exception
from keystone.i18n import _
from keystone.i18n import _LI
from keystone.i18n import _LW
from keystone.models import token_model
CONF = cfg.CONF
LOG = log.getLogger(__name__)
# Environment variable used to pass the request context
CONTEXT_ENV = 'openstack.context'
# Environment variable used to pass the request params
PARAMS_ENV = 'openstack.params'
def validate_token_bind(context, token_ref):
bind_mode = CONF.token.enforce_token_bind
if bind_mode == 'disabled':
return
if not isinstance(token_ref, token_model.KeystoneToken):
raise exception.UnexpectedError(_('token reference must be a '
'KeystoneToken type, got: %s') %
type(token_ref))
bind = token_ref.bind
# permissive and strict modes don't require there to be a bind
permissive = bind_mode in ('permissive', 'strict')
# get the named mode if bind_mode is not one of the known
name = None if permissive or bind_mode == 'required' else bind_mode
if not bind:
if permissive:
# no bind provided and none required
return
else:
LOG.info(_LI("No bind information present in token"))
raise exception.Unauthorized()
if name and name not in bind:
LOG.info(_LI("Named bind mode %s not in bind information"), name)
raise exception.Unauthorized()
for bind_type, identifier in six.iteritems(bind):
if bind_type == 'kerberos':
if not (context['environment'].get('AUTH_TYPE', '').lower()
== 'negotiate'):
LOG.info(_LI("Kerberos credentials required and not present"))
raise exception.Unauthorized()
if not context['environment'].get('REMOTE_USER') == identifier:
LOG.info(_LI("Kerberos credentials do not match "
"those in bind"))
raise exception.Unauthorized()
LOG.info(_LI("Kerberos bind authentication successful"))
elif bind_mode == 'permissive':
LOG.debug(("Ignoring unknown bind for permissive mode: "
"{%(bind_type)s: %(identifier)s}"),
{'bind_type': bind_type, 'identifier': identifier})
else:
LOG.info(_LI("Couldn't verify unknown bind: "
"{%(bind_type)s: %(identifier)s}"),
{'bind_type': bind_type, 'identifier': identifier})
raise exception.Unauthorized()
def best_match_language(req):
"""Determines the best available locale from the Accept-Language
HTTP header passed in the request.
"""
if not req.accept_language:
return None
return req.accept_language.best_match(
oslo_i18n.get_available_languages('keystone'))
class BaseApplication(object):
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [app:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[app:wadl]
latest_version = 1.3
paste.app_factory = keystone.fancy_api:Wadl.factory
which would result in a call to the `Wadl` class as
import keystone.fancy_api
keystone.fancy_api.Wadl(latest_version='1.3')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
return cls(**local_config)
def __call__(self, environ, start_response):
r"""Subclasses will probably want to implement __call__ like this:
@webob.dec.wsgify()
def __call__(self, req):
# Any of the following objects work as responses:
# Option 1: simple string
res = 'message\n'
# Option 2: a nicely formatted HTTP exception page
res = exc.HTTPForbidden(explanation='Nice try')
# Option 3: a webob Response object (in case you need to play with
# headers, or you want to be treated like an iterable, or or or)
res = Response();
res.app_iter = open('somefile')
# Option 4: any wsgi app to be run next
res = self.application
# Option 5: you can get a Response object for a wsgi app, too, to
# play with headers etc
res = req.get_response(self.application)
# You can then just return your response...
return res
# ... or set req.response and return None.
req.response = res
See the end of http://pythonpaste.org/webob/modules/dec.html
for more info.
"""
raise NotImplementedError('You must implement __call__')
@dependency.requires('assignment_api', 'policy_api', 'token_provider_api')
class Application(BaseApplication):
@webob.dec.wsgify()
def __call__(self, req):
arg_dict = req.environ['wsgiorg.routing_args'][1]
action = arg_dict.pop('action')
del arg_dict['controller']
# allow middleware up the stack to provide context, params and headers.
context = req.environ.get(CONTEXT_ENV, {})
context['query_string'] = dict(six.iteritems(req.params))
context['headers'] = dict(six.iteritems(req.headers))
context['path'] = req.environ['PATH_INFO']
scheme = (None if not CONF.secure_proxy_ssl_header
else req.environ.get(CONF.secure_proxy_ssl_header))
if scheme:
# NOTE(andrey-mp): "wsgi.url_scheme" contains the protocol used
# before the proxy removed it ('https' usually). So if
# the webob.Request instance is modified in order to use this
# scheme instead of the one defined by API, the call to
# webob.Request.relative_url() will return a URL with the correct
# scheme.
req.environ['wsgi.url_scheme'] = scheme
context['host_url'] = req.host_url
params = req.environ.get(PARAMS_ENV, {})
# authentication and authorization attributes are set as environment
# values by the container and processed by the pipeline. the complete
# set is not yet know.
context['environment'] = req.environ
context['accept_header'] = req.accept
req.environ = None
params.update(arg_dict)
context.setdefault('is_admin', False)
# TODO(termie): do some basic normalization on methods
method = getattr(self, action)
# NOTE(morganfainberg): use the request method to normalize the
# response code between GET and HEAD requests. The HTTP status should
# be the same.
req_method = req.environ['REQUEST_METHOD'].upper()
LOG.info('%(req_method)s %(path)s?%(params)s', {
'req_method': req_method,
'path': context['path'],
'params': urllib.urlencode(req.params)})
params = self._normalize_dict(params)
try:
result = method(context, **params)
except exception.Unauthorized as e:
LOG.warning(
_LW("Authorization failed. %(exception)s from "
"%(remote_addr)s"),
{'exception': e, 'remote_addr': req.environ['REMOTE_ADDR']})
return render_exception(e, context=context,
user_locale=best_match_language(req))
except exception.Error as e:
LOG.warning(six.text_type(e))
return render_exception(e, context=context,
user_locale=best_match_language(req))
except TypeError as e:
LOG.exception(six.text_type(e))
return render_exception(exception.ValidationError(e),
context=context,
user_locale=best_match_language(req))
except Exception as e:
LOG.exception(six.text_type(e))
return render_exception(exception.UnexpectedError(exception=e),
context=context,
user_locale=best_match_language(req))
if result is None:
return render_response(status=(204, 'No Content'))
elif isinstance(result, six.string_types):
return result
elif isinstance(result, webob.Response):
return result
elif isinstance(result, webob.exc.WSGIHTTPException):
return result
response_code = self._get_response_code(req)
return render_response(body=result, status=response_code,
method=req_method)
def _get_response_code(self, req):
req_method = req.environ['REQUEST_METHOD']
controller = importutils.import_class('keystone.common.controller')
code = None
if isinstance(self, controller.V3Controller) and req_method == 'POST':
code = (201, 'Created')
return code
def _normalize_arg(self, arg):
return arg.replace(':', '_').replace('-', '_')
def _normalize_dict(self, d):
return {self._normalize_arg(k): v for (k, v) in six.iteritems(d)}
def assert_admin(self, context):
if not context['is_admin']:
try:
user_token_ref = token_model.KeystoneToken(
token_id=context['token_id'],
token_data=self.token_provider_api.validate_token(
context['token_id']))
except exception.TokenNotFound as e:
raise exception.Unauthorized(e)
validate_token_bind(context, user_token_ref)
creds = copy.deepcopy(user_token_ref.metadata)
try:
creds['user_id'] = user_token_ref.user_id
except exception.UnexpectedError:
LOG.debug('Invalid user')
raise exception.Unauthorized()
if user_token_ref.project_scoped:
creds['tenant_id'] = user_token_ref.project_id
else:
LOG.debug('Invalid tenant')
raise exception.Unauthorized()
creds['roles'] = user_token_ref.role_names
# Accept either is_admin or the admin role
self.policy_api.enforce(creds, 'admin_required', {})
def _attribute_is_empty(self, ref, attribute):
"""Returns true if the attribute in the given ref (which is a
dict) is empty or None.
"""
return ref.get(attribute) is None or ref.get(attribute) == ''
def _require_attribute(self, ref, attribute):
"""Ensures the reference contains the specified attribute.
Raise a ValidationError if the given attribute is not present
"""
if self._attribute_is_empty(ref, attribute):
msg = _('%s field is required and cannot be empty') % attribute
raise exception.ValidationError(message=msg)
def _require_attributes(self, ref, attrs):
"""Ensures the reference contains the specified attributes.
Raise a ValidationError if any of the given attributes is not present
"""
missing_attrs = [attribute for attribute in attrs
if self._attribute_is_empty(ref, attribute)]
if missing_attrs:
msg = _('%s field(s) cannot be empty') % ', '.join(missing_attrs)
raise exception.ValidationError(message=msg)
def _get_trust_id_for_request(self, context):
"""Get the trust_id for a call.
Retrieve the trust_id from the token
Returns None if token is not trust scoped
"""
if ('token_id' not in context or
context.get('token_id') == CONF.admin_token):
LOG.debug(('will not lookup trust as the request auth token is '
'either absent or it is the system admin token'))
return None
try:
token_data = self.token_provider_api.validate_token(
context['token_id'])
except exception.TokenNotFound:
LOG.warning(_LW('Invalid token in _get_trust_id_for_request'))
raise exception.Unauthorized()
token_ref = token_model.KeystoneToken(token_id=context['token_id'],
token_data=token_data)
return token_ref.trust_id
@classmethod
def base_url(cls, context, endpoint_type):
url = CONF['%s_endpoint' % endpoint_type]
if url:
substitutions = dict(
itertools.chain(six.iteritems(CONF),
six.iteritems(CONF.eventlet_server)))
url = url % substitutions
else:
# NOTE(jamielennox): if url is not set via the config file we
# should set it relative to the url that the user used to get here
# so as not to mess with version discovery. This is not perfect.
# host_url omits the path prefix, but there isn't another good
# solution that will work for all urls.
url = context['host_url']
return url.rstrip('/')
class Middleware(Application):
"""Base WSGI middleware.
These classes require an application to be
initialized that will be called next. By default the middleware will
simply call its wrapped app, or you can override __call__ to customize its
behavior.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = keystone.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import keystone.analytics
keystone.analytics.Analytics(app, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
conf = global_config.copy()
conf.update(local_config)
return cls(app, **local_config)
return _factory
def __init__(self, application):
super(Middleware, self).__init__()
self.application = application
def process_request(self, request):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, request, response):
"""Do whatever you'd like to the response, based on the request."""
return response
@webob.dec.wsgify()
def __call__(self, request):
try:
response = self.process_request(request)
if response:
return response
response = request.get_response(self.application)
return self.process_response(request, response)
except exception.Error as e:
LOG.warning(six.text_type(e))
return render_exception(e, request=request,
user_locale=best_match_language(request))
except TypeError as e:
LOG.exception(six.text_type(e))
return render_exception(exception.ValidationError(e),
request=request,
user_locale=best_match_language(request))
except Exception as e:
LOG.exception(six.text_type(e))
return render_exception(exception.UnexpectedError(exception=e),
request=request,
user_locale=best_match_language(request))
class Debug(Middleware):
"""Helper class for debugging a WSGI application.
Can be inserted into any WSGI application chain to get information
about the request and response.
"""
@webob.dec.wsgify()
def __call__(self, req):
if not hasattr(LOG, 'isEnabledFor') or LOG.isEnabledFor(LOG.debug):
LOG.debug('%s %s %s', ('*' * 20), 'REQUEST ENVIRON', ('*' * 20))
for key, value in req.environ.items():
LOG.debug('%s = %s', key,
strutils.mask_password(value))
LOG.debug('')
LOG.debug('%s %s %s', ('*' * 20), 'REQUEST BODY', ('*' * 20))
for line in req.body_file:
LOG.debug('%s', strutils.mask_password(line))
LOG.debug('')
resp = req.get_response(self.application)
if not hasattr(LOG, 'isEnabledFor') or LOG.isEnabledFor(LOG.debug):
LOG.debug('%s %s %s', ('*' * 20), 'RESPONSE HEADERS', ('*' * 20))
for (key, value) in six.iteritems(resp.headers):
LOG.debug('%s = %s', key, value)
LOG.debug('')
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""Iterator that prints the contents of a wrapper string."""
LOG.debug('%s %s %s', ('*' * 20), 'RESPONSE BODY', ('*' * 20))
for part in app_iter:
LOG.debug(part)
yield part
class Router(object):
"""WSGI middleware that maps incoming requests to WSGI apps."""
def __init__(self, mapper):
"""Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be an object that can route
the request to the action-specific method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, '/svrlist', controller=sc, action='list')
# Actions are all implicitly defined
mapper.resource('server', 'servers', controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp())
"""
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify()
def __call__(self, req):
"""Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify()
def _dispatch(req):
"""Dispatch the request to the appropriate controller.
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
msg = _('The resource could not be found.')
return render_exception(exception.NotFound(msg),
request=req,
user_locale=best_match_language(req))
app = match['controller']
return app
class ComposingRouter(Router):
def __init__(self, mapper=None, routers=None):
if mapper is None:
mapper = routes.Mapper()
if routers is None:
routers = []
for router in routers:
router.add_routes(mapper)
super(ComposingRouter, self).__init__(mapper)
class ComposableRouter(Router):
"""Router that supports use by ComposingRouter."""
def __init__(self, mapper=None):
if mapper is None:
mapper = routes.Mapper()
self.add_routes(mapper)
super(ComposableRouter, self).__init__(mapper)
def add_routes(self, mapper):
"""Add routes to given mapper."""
pass
class ExtensionRouter(Router):
"""A router that allows extensions to supplement or overwrite routes.
Expects to be subclassed.
"""
def __init__(self, application, mapper=None):
if mapper is None:
mapper = routes.Mapper()
self.application = application
self.add_routes(mapper)
mapper.connect('{path_info:.*}', controller=self.application)
super(ExtensionRouter, self).__init__(mapper)
def add_routes(self, mapper):
pass
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = keystone.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import keystone.analytics
keystone.analytics.Analytics(app, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
conf = global_config.copy()
conf.update(local_config)
return cls(app, **local_config)
return _factory
class RoutersBase(object):
"""Base class for Routers."""
def __init__(self):
self.v3_resources = []
def append_v3_routers(self, mapper, routers):
"""Append v3 routers.
Subclasses should override this method to map its routes.
Use self._add_resource() to map routes for a resource.
"""
def _add_resource(self, mapper, controller, path, rel,
get_action=None, head_action=None, get_head_action=None,
put_action=None, post_action=None, patch_action=None,
delete_action=None, get_post_action=None,
path_vars=None, status=None):
if get_head_action:
getattr(controller, get_head_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=get_head_action,
conditions=dict(method=['GET', 'HEAD']))
if get_action:
getattr(controller, get_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=get_action,
conditions=dict(method=['GET']))
if head_action:
getattr(controller, head_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=head_action,
conditions=dict(method=['HEAD']))
if put_action:
getattr(controller, put_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=put_action,
conditions=dict(method=['PUT']))
if post_action:
getattr(controller, post_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=post_action,
conditions=dict(method=['POST']))
if patch_action:
getattr(controller, patch_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=patch_action,
conditions=dict(method=['PATCH']))
if delete_action:
getattr(controller, delete_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=delete_action,
conditions=dict(method=['DELETE']))
if get_post_action:
getattr(controller, get_post_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=get_post_action,
conditions=dict(method=['GET', 'POST']))
resource_data = dict()
if path_vars:
resource_data['href-template'] = path
resource_data['href-vars'] = path_vars
else:
resource_data['href'] = path
if status:
if not json_home.Status.is_supported(status):
raise exception.Error(message=_(
'Unexpected status requested for JSON Home response, %s') %
status)
resource_data.setdefault('hints', {})
resource_data['hints']['status'] = status
self.v3_resources.append((rel, resource_data))
class V3ExtensionRouter(ExtensionRouter, RoutersBase):
"""Base class for V3 extension router."""
def __init__(self, application, mapper=None):
self.v3_resources = list()
super(V3ExtensionRouter, self).__init__(application, mapper)
def _update_version_response(self, response_data):
response_data['resources'].update(self.v3_resources)
@webob.dec.wsgify()
def __call__(self, request):
if request.path_info != '/':
# Not a request for version info so forward to super.
return super(V3ExtensionRouter, self).__call__(request)
response = request.get_response(self.application)
if response.status_code != 200:
# The request failed, so don't update the response.
return response
if response.headers['Content-Type'] != 'application/json-home':
# Not a request for JSON Home document, so don't update the
# response.
return response
response_data = jsonutils.loads(response.body)
self._update_version_response(response_data)
response.body = jsonutils.dumps(response_data,
cls=utils.SmarterEncoder)
return response
def render_response(body=None, status=None, headers=None, method=None):
"""Forms a WSGI response."""
if headers is None:
headers = []
else:
headers = list(headers)
headers.append(('Vary', 'X-Auth-Token'))
if body is None:
body = ''
status = status or (204, 'No Content')
else:
content_types = [v for h, v in headers if h == 'Content-Type']
if content_types:
content_type = content_types[0]
else:
content_type = None
JSON_ENCODE_CONTENT_TYPES = ('application/json',
'application/json-home',)
if content_type is None or content_type in JSON_ENCODE_CONTENT_TYPES:
body = jsonutils.dumps(body, cls=utils.SmarterEncoder)
if content_type is None:
headers.append(('Content-Type', 'application/json'))
status = status or (200, 'OK')
resp = webob.Response(body=body,
status='%s %s' % status,
headerlist=headers)
if method == 'HEAD':
# NOTE(morganfainberg): HEAD requests should return the same status
# as a GET request and same headers (including content-type and
# content-length). The webob.Response object automatically changes
# content-length (and other headers) if the body is set to b''. Capture
# all headers and reset them on the response object after clearing the
# body. The body can only be set to a binary-type (not TextType or
# NoneType), so b'' is used here and should be compatible with
# both py2x and py3x.
stored_headers = resp.headers.copy()
resp.body = b''
for header, value in six.iteritems(stored_headers):
resp.headers[header] = value
return resp
def render_exception(error, context=None, request=None, user_locale=None):
"""Forms a WSGI response based on the current error."""
error_message = error.args[0]
message = oslo_i18n.translate(error_message, desired_locale=user_locale)
if message is error_message:
# translate() didn't do anything because it wasn't a Message,
# convert to a string.
message = six.text_type(message)
body = {'error': {
'code': error.code,
'title': error.title,
'message': message,
}}
headers = []
if isinstance(error, exception.AuthPluginException):
body['error']['identity'] = error.authentication
elif isinstance(error, exception.Unauthorized):
url = CONF.public_endpoint
if not url:
if request:
context = {'host_url': request.host_url}
if context:
url = Application.base_url(context, 'public')
else:
url = 'http://localhost:%d' % CONF.eventlet_server.public_port
else:
substitutions = dict(
itertools.chain(six.iteritems(CONF),
six.iteritems(CONF.eventlet_server)))
url = url % substitutions
headers.append(('WWW-Authenticate', 'Keystone uri="%s"' % url))
return render_response(status=(error.code, error.title),
body=body,
headers=headers)
|
Dricus/dwarf-fortress-launcher
|
refs/heads/master
|
src/launcher.py
|
1
|
from app.api import set_qt_api_version # @UnusedImport
if __name__ == '__main__':
'''
Starts the Qt application.
'''
import sys
from app.app import App
from ui.mainwindow import MainWindow
from PyQt4 import QtGui
qt_app = QtGui.QApplication(sys.argv)
app = App()
window = MainWindow(app)
window.show()
qt_app.exec_()
|
matthiasdiener/spack
|
refs/heads/develop
|
lib/spack/spack/util/compression.py
|
4
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import re
import os
from itertools import product
from spack.util.executable import which
# Supported archive extensions.
PRE_EXTS = ["tar", "TAR"]
EXTS = ["gz", "bz2", "xz", "Z", "zip", "tgz"]
# Add PRE_EXTS and EXTS last so that .tar.gz is matched *before* .tar or .gz
ALLOWED_ARCHIVE_TYPES = [".".join(l) for l in product(
PRE_EXTS, EXTS)] + PRE_EXTS + EXTS
def allowed_archive(path):
return any(path.endswith(t) for t in ALLOWED_ARCHIVE_TYPES)
def decompressor_for(path, extension=None):
"""Get the appropriate decompressor for a path."""
if ((extension and re.match(r'\.?zip$', extension)) or
path.endswith('.zip')):
unzip = which('unzip', required=True)
unzip.add_default_arg('-q')
return unzip
if extension and re.match(r'gz', extension):
gunzip = which('gunzip', required=True)
return gunzip
tar = which('tar', required=True)
tar.add_default_arg('-xf')
return tar
def strip_extension(path):
"""Get the part of a path that does not include its compressed
type extension."""
for type in ALLOWED_ARCHIVE_TYPES:
suffix = r'\.%s$' % type
if re.search(suffix, path):
return re.sub(suffix, "", path)
return path
def extension(path):
"""Get the archive extension for a path."""
if path is None:
raise ValueError("Can't call extension() on None")
# Strip sourceforge suffix.
if re.search(r'((?:sourceforge.net|sf.net)/.*)/download$', path):
path = os.path.dirname(path)
for t in ALLOWED_ARCHIVE_TYPES:
suffix = r'\.%s$' % t
if re.search(suffix, path):
return t
return None
|
carljm/django
|
refs/heads/master
|
tests/admin_custom_urls/urls.py
|
405
|
from django.conf.urls import url
from .models import site
urlpatterns = [
url(r'^admin/', site.urls),
]
|
miguelparaiso/OdooAccessible
|
refs/heads/master
|
addons/website_mail_group/controllers/main.py
|
306
|
# -*- coding: utf-8 -*-
import datetime
from dateutil import relativedelta
from openerp import tools, SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.website.models.website import slug
from openerp.addons.web.http import request
class MailGroup(http.Controller):
_thread_per_page = 20
_replies_per_page = 10
def _get_archives(self, group_id):
MailMessage = request.registry['mail.message']
groups = MailMessage.read_group(
request.cr, request.uid, [('model', '=', 'mail.group'), ('res_id', '=', group_id)], ['subject', 'date'],
groupby="date", orderby="date desc", context=request.context)
for group in groups:
begin_date = datetime.datetime.strptime(group['__domain'][0][2], tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
end_date = datetime.datetime.strptime(group['__domain'][1][2], tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
group['date_begin'] = '%s' % datetime.date.strftime(begin_date, tools.DEFAULT_SERVER_DATE_FORMAT)
group['date_end'] = '%s' % datetime.date.strftime(end_date, tools.DEFAULT_SERVER_DATE_FORMAT)
return groups
@http.route("/groups", type='http', auth="public", website=True)
def view(self, **post):
cr, uid, context = request.cr, request.uid, request.context
group_obj = request.registry.get('mail.group')
mail_message_obj = request.registry.get('mail.message')
group_ids = group_obj.search(cr, uid, [('alias_id', '!=', False), ('alias_id.alias_name', '!=', False)], context=context)
groups = group_obj.browse(cr, uid, group_ids, context)
# compute statistics
month_date = datetime.datetime.today() - relativedelta.relativedelta(months=1)
group_data = dict()
for group in groups:
group_data[group.id] = {
'monthly_message_nbr': mail_message_obj.search(
cr, SUPERUSER_ID,
[('model', '=', 'mail.group'), ('res_id', '=', group.id), ('date', '>=', month_date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT))],
count=True, context=context)}
values = {'groups': groups, 'group_data': group_data}
return request.website.render('website_mail_group.mail_groups', values)
@http.route(["/groups/subscription/"], type='json', auth="user")
def subscription(self, group_id=0, action=False, **post):
""" TDE FIXME: seems dead code """
cr, uid, context = request.cr, request.uid, request.context
group_obj = request.registry.get('mail.group')
if action:
group_obj.message_subscribe_users(cr, uid, [group_id], context=context)
else:
group_obj.message_unsubscribe_users(cr, uid, [group_id], context=context)
return []
@http.route([
"/groups/<model('mail.group'):group>",
"/groups/<model('mail.group'):group>/page/<int:page>"
], type='http', auth="public", website=True)
def thread_headers(self, group, page=1, mode='thread', date_begin=None, date_end=None, **post):
cr, uid, context = request.cr, request.uid, request.context
thread_obj = request.registry.get('mail.message')
domain = [('model', '=', 'mail.group'), ('res_id', '=', group.id)]
if mode == 'thread':
domain += [('parent_id', '=', False)]
if date_begin and date_end:
domain += [('date', '>=', date_begin), ('date', '<=', date_end)]
thread_count = thread_obj.search_count(cr, uid, domain, context=context)
pager = request.website.pager(
url='/groups/%s' % slug(group),
total=thread_count,
page=page,
step=self._thread_per_page,
url_args={'mode': mode, 'date_begin': date_begin or '', 'date_end': date_end or ''},
)
thread_ids = thread_obj.search(cr, uid, domain, limit=self._thread_per_page, offset=pager['offset'])
messages = thread_obj.browse(cr, uid, thread_ids, context)
values = {
'messages': messages,
'group': group,
'pager': pager,
'mode': mode,
'archives': self._get_archives(group.id),
'date_begin': date_begin,
'date_end': date_end,
'replies_per_page': self._replies_per_page,
}
return request.website.render('website_mail_group.group_messages', values)
@http.route([
'''/groups/<model('mail.group'):group>/<model('mail.message', "[('model','=','mail.group'), ('res_id','=',group[0])]"):message>''',
], type='http', auth="public", website=True)
def thread_discussion(self, group, message, mode='thread', date_begin=None, date_end=None, **post):
cr, uid, context = request.cr, request.uid, request.context
Message = request.registry['mail.message']
if mode == 'thread':
base_domain = [('model', '=', 'mail.group'), ('res_id', '=', group.id), ('parent_id', '=', message.parent_id and message.parent_id.id or False)]
else:
base_domain = [('model', '=', 'mail.group'), ('res_id', '=', group.id)]
next_message = None
next_message_ids = Message.search(cr, uid, base_domain + [('date', '<', message.date)], order="date DESC", limit=1, context=context)
if next_message_ids:
next_message = Message.browse(cr, uid, next_message_ids[0], context=context)
prev_message = None
prev_message_ids = Message.search(cr, uid, base_domain + [('date', '>', message.date)], order="date ASC", limit=1, context=context)
if prev_message_ids:
prev_message = Message.browse(cr, uid, prev_message_ids[0], context=context)
values = {
'message': message,
'group': group,
'mode': mode,
'archives': self._get_archives(group.id),
'date_begin': date_begin,
'date_end': date_end,
'replies_per_page': self._replies_per_page,
'next_message': next_message,
'prev_message': prev_message,
}
return request.website.render('website_mail_group.group_message', values)
@http.route(
'''/groups/<model('mail.group'):group>/<model('mail.message', "[('model','=','mail.group'), ('res_id','=',group[0])]"):message>/get_replies''',
type='json', auth="public", methods=['POST'], website=True)
def render_messages(self, group, message, **post):
last_displayed_id = post.get('last_displayed_id')
if not last_displayed_id:
return False
Message = request.registry['mail.message']
replies_domain = [('id', '<', int(last_displayed_id)), ('parent_id', '=', message.id)]
msg_ids = Message.search(request.cr, request.uid, replies_domain, limit=self._replies_per_page, context=request.context)
msg_count = Message.search(request.cr, request.uid, replies_domain, count=True, context=request.context)
messages = Message.browse(request.cr, request.uid, msg_ids, context=request.context)
values = {
'group': group,
'thread_header': message,
'messages': messages,
'msg_more_count': msg_count - self._replies_per_page,
'replies_per_page': self._replies_per_page,
}
return request.registry['ir.ui.view'].render(request.cr, request.uid, 'website_mail_group.messages_short', values, engine='ir.qweb', context=request.context)
@http.route("/groups/<model('mail.group'):group>/get_alias_info", type='json', auth='public', website=True)
def get_alias_info(self, group, **post):
return {
'alias_name': group.alias_id and group.alias_id.alias_name and group.alias_id.alias_domain and '%s@%s' % (group.alias_id.alias_name, group.alias_id.alias_domain) or False
}
|
apporc/nova
|
refs/heads/master
|
nova/objects/pci_device.py
|
2
|
# Copyright 2013 Intel Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import versionutils
from nova import db
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
LOG = logging.getLogger(__name__)
def compare_pci_device_attributes(obj_a, obj_b):
pci_ignore_fields = base.NovaPersistentObject.fields.keys()
for name in obj_a.obj_fields:
if name in pci_ignore_fields:
continue
is_set_a = obj_a.obj_attr_is_set(name)
is_set_b = obj_b.obj_attr_is_set(name)
if is_set_a != is_set_b:
return False
if is_set_a:
if getattr(obj_a, name) != getattr(obj_b, name):
return False
return True
@base.NovaObjectRegistry.register
class PciDevice(base.NovaPersistentObject, base.NovaObject):
"""Object to represent a PCI device on a compute node.
PCI devices are managed by the compute resource tracker, which discovers
the devices from the hardware platform, claims, allocates and frees
devices for instances.
The PCI device information is permanently maintained in a database.
This makes it convenient to get PCI device information, like physical
function for a VF device, adjacent switch IP address for a NIC,
hypervisor identification for a PCI device, etc. It also provides a
convenient way to check device allocation information for administrator
purposes.
A device can be in available/claimed/allocated/deleted/removed state.
A device is available when it is discovered..
A device is claimed prior to being allocated to an instance. Normally the
transition from claimed to allocated is quick. However, during a resize
operation the transition can take longer, because devices are claimed in
prep_resize and allocated in finish_resize.
A device becomes removed when hot removed from a node (i.e. not found in
the next auto-discover) but not yet synced with the DB. A removed device
should not be allocated to any instance, and once deleted from the DB,
the device object is changed to deleted state and no longer synced with
the DB.
Filed notes::
| 'dev_id':
| Hypervisor's identification for the device, the string format
| is hypervisor specific
| 'extra_info':
| Device-specific properties like PF address, switch ip address etc.
"""
# Version 1.0: Initial version
# Version 1.1: String attributes updated to support unicode
# Version 1.2: added request_id field
# Version 1.3: Added field to represent PCI device NUMA node
VERSION = '1.3'
fields = {
'id': fields.IntegerField(),
# Note(yjiang5): the compute_node_id may be None because the pci
# device objects are created before the compute node is created in DB
'compute_node_id': fields.IntegerField(nullable=True),
'address': fields.StringField(),
'vendor_id': fields.StringField(),
'product_id': fields.StringField(),
'dev_type': fields.PciDeviceTypeField(),
'status': fields.PciDeviceStatusField(),
'dev_id': fields.StringField(nullable=True),
'label': fields.StringField(nullable=True),
'instance_uuid': fields.StringField(nullable=True),
'request_id': fields.StringField(nullable=True),
'extra_info': fields.DictOfStringsField(),
'numa_node': fields.IntegerField(nullable=True),
}
def obj_make_compatible(self, primitive, target_version):
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 2) and 'request_id' in primitive:
del primitive['request_id']
def update_device(self, dev_dict):
"""Sync the content from device dictionary to device object.
The resource tracker updates the available devices periodically.
To avoid meaningless syncs with the database, we update the device
object only if a value changed.
"""
# Note(yjiang5): status/instance_uuid should only be updated by
# functions like claim/allocate etc. The id is allocated by
# database. The extra_info is created by the object.
no_changes = ('status', 'instance_uuid', 'id', 'extra_info')
map(lambda x: dev_dict.pop(x, None),
[key for key in no_changes])
for k, v in dev_dict.items():
if k in self.fields.keys():
setattr(self, k, v)
else:
# Note (yjiang5) extra_info.update does not update
# obj_what_changed, set it explicitly
extra_info = self.extra_info
extra_info.update({k: v})
self.extra_info = extra_info
def __init__(self, *args, **kwargs):
super(PciDevice, self).__init__(*args, **kwargs)
self.obj_reset_changes()
self.extra_info = {}
def __eq__(self, other):
return compare_pci_device_attributes(self, other)
def __ne__(self, other):
return not (self == other)
@staticmethod
def _from_db_object(context, pci_device, db_dev):
for key in pci_device.fields:
if key != 'extra_info':
setattr(pci_device, key, db_dev[key])
else:
extra_info = db_dev.get("extra_info")
pci_device.extra_info = jsonutils.loads(extra_info)
pci_device._context = context
pci_device.obj_reset_changes()
return pci_device
@base.remotable_classmethod
def get_by_dev_addr(cls, context, compute_node_id, dev_addr):
db_dev = db.pci_device_get_by_addr(
context, compute_node_id, dev_addr)
return cls._from_db_object(context, cls(), db_dev)
@base.remotable_classmethod
def get_by_dev_id(cls, context, id):
db_dev = db.pci_device_get_by_id(context, id)
return cls._from_db_object(context, cls(), db_dev)
@classmethod
def create(cls, dev_dict):
"""Create a PCI device based on hypervisor information.
As the device object is just created and is not synced with db yet
thus we should not reset changes here for fields from dict.
"""
pci_device = cls()
pci_device.update_device(dev_dict)
pci_device.status = fields.PciDeviceStatus.AVAILABLE
return pci_device
@base.remotable
def save(self):
if self.status == fields.PciDeviceStatus.REMOVED:
self.status = fields.PciDeviceStatus.DELETED
db.pci_device_destroy(self._context, self.compute_node_id,
self.address)
elif self.status != fields.PciDeviceStatus.DELETED:
updates = self.obj_get_changes()
if 'extra_info' in updates:
updates['extra_info'] = jsonutils.dumps(updates['extra_info'])
if updates:
db_pci = db.pci_device_update(self._context,
self.compute_node_id,
self.address, updates)
self._from_db_object(self._context, self, db_pci)
def claim(self, instance):
if self.status != fields.PciDeviceStatus.AVAILABLE:
raise exception.PciDeviceInvalidStatus(
compute_node_id=self.compute_node_id,
address=self.address, status=self.status,
hopestatus=[fields.PciDeviceStatus.AVAILABLE])
self.status = fields.PciDeviceStatus.CLAIMED
self.instance_uuid = instance['uuid']
def allocate(self, instance):
ok_statuses = (fields.PciDeviceStatus.AVAILABLE,
fields.PciDeviceStatus.CLAIMED)
if self.status not in ok_statuses:
raise exception.PciDeviceInvalidStatus(
compute_node_id=self.compute_node_id,
address=self.address, status=self.status,
hopestatus=ok_statuses)
if (self.status == fields.PciDeviceStatus.CLAIMED and
self.instance_uuid != instance['uuid']):
raise exception.PciDeviceInvalidOwner(
compute_node_id=self.compute_node_id,
address=self.address, owner=self.instance_uuid,
hopeowner=instance['uuid'])
self.status = fields.PciDeviceStatus.ALLOCATED
self.instance_uuid = instance['uuid']
# Notes(yjiang5): remove this check when instance object for
# compute manager is finished
if isinstance(instance, dict):
if 'pci_devices' not in instance:
instance['pci_devices'] = []
instance['pci_devices'].append(copy.copy(self))
else:
instance.pci_devices.objects.append(copy.copy(self))
def remove(self):
if self.status != fields.PciDeviceStatus.AVAILABLE:
raise exception.PciDeviceInvalidStatus(
compute_node_id=self.compute_node_id,
address=self.address, status=self.status,
hopestatus=[fields.PciDeviceStatus.AVAILABLE])
self.status = fields.PciDeviceStatus.REMOVED
self.instance_uuid = None
self.request_id = None
def free(self, instance=None):
ok_statuses = (fields.PciDeviceStatus.ALLOCATED,
fields.PciDeviceStatus.CLAIMED)
if self.status not in ok_statuses:
raise exception.PciDeviceInvalidStatus(
compute_node_id=self.compute_node_id,
address=self.address, status=self.status,
hopestatus=ok_statuses)
if instance and self.instance_uuid != instance['uuid']:
raise exception.PciDeviceInvalidOwner(
compute_node_id=self.compute_node_id,
address=self.address, owner=self.instance_uuid,
hopeowner=instance['uuid'])
old_status = self.status
self.status = fields.PciDeviceStatus.AVAILABLE
self.instance_uuid = None
self.request_id = None
if old_status == fields.PciDeviceStatus.ALLOCATED and instance:
# Notes(yjiang5): remove this check when instance object for
# compute manager is finished
existed = next((dev for dev in instance['pci_devices']
if dev.id == self.id))
if isinstance(instance, dict):
instance['pci_devices'].remove(existed)
else:
instance.pci_devices.objects.remove(existed)
@base.NovaObjectRegistry.register
class PciDeviceList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# PciDevice <= 1.1
# Version 1.1: PciDevice 1.2
# Version 1.2: PciDevice 1.3
VERSION = '1.2'
fields = {
'objects': fields.ListOfObjectsField('PciDevice'),
}
def __init__(self, *args, **kwargs):
super(PciDeviceList, self).__init__(*args, **kwargs)
self.objects = []
self.obj_reset_changes()
@base.remotable_classmethod
def get_by_compute_node(cls, context, node_id):
db_dev_list = db.pci_device_get_all_by_node(context, node_id)
return base.obj_make_list(context, cls(context), objects.PciDevice,
db_dev_list)
@base.remotable_classmethod
def get_by_instance_uuid(cls, context, uuid):
db_dev_list = db.pci_device_get_all_by_instance_uuid(context, uuid)
return base.obj_make_list(context, cls(context), objects.PciDevice,
db_dev_list)
|
ciex/motor
|
refs/heads/master
|
lib/werkzeug/contrib/atom.py
|
90
|
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.atom
~~~~~~~~~~~~~~~~~~~~~
This module provides a class called :class:`AtomFeed` which can be
used to generate feeds in the Atom syndication format (see :rfc:`4287`).
Example::
def atom_feed(request):
feed = AtomFeed("My Blog", feed_url=request.url,
url=request.host_url,
subtitle="My example blog for a feed test.")
for post in Post.query.limit(10).all():
feed.add(post.title, post.body, content_type='html',
author=post.author, url=post.url, id=post.uid,
updated=post.last_update, published=post.pub_date)
return feed.get_response()
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime
from werkzeug.utils import escape
from werkzeug.wrappers import BaseResponse
XHTML_NAMESPACE = 'http://www.w3.org/1999/xhtml'
def _make_text_block(name, content, content_type=None):
"""Helper function for the builder that creates an XML text block."""
if content_type == 'xhtml':
return u'<%s type="xhtml"><div xmlns="%s">%s</div></%s>\n' % \
(name, XHTML_NAMESPACE, content, name)
if not content_type:
return u'<%s>%s</%s>\n' % (name, escape(content), name)
return u'<%s type="%s">%s</%s>\n' % (name, content_type,
escape(content), name)
def format_iso8601(obj):
"""Format a datetime object for iso8601"""
return obj.strftime('%Y-%m-%dT%H:%M:%SZ')
class AtomFeed(object):
"""A helper class that creates Atom feeds.
:param title: the title of the feed. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the feed (not the url *of* the feed)
:param id: a globally unique id for the feed. Must be an URI. If
not present the `feed_url` is used, but one of both is
required.
:param updated: the time the feed was modified the last time. Must
be a :class:`datetime.datetime` object. If not
present the latest entry's `updated` is used.
:param feed_url: the URL to the feed. Should be the URL that was
requested.
:param author: the author of the feed. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if not every entry has an
author element.
:param icon: an icon for the feed.
:param logo: a logo for the feed.
:param rights: copyright information for the feed.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param subtitle: a short description of the feed.
:param subtitle_type: the type attribute for the subtitle element.
One of ``'text'``, ``'html'``, ``'text'``
or ``'xhtml'``. Default is ``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param generator: the software that generated this feed. This must be
a tuple in the form ``(name, url, version)``. If
you don't want to specify one of them, set the item
to `None`.
:param entries: a list with the entries for the feed. Entries can also
be added later with :meth:`add`.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
default_generator = ('Werkzeug', None, None)
def __init__(self, title=None, entries=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.url = kwargs.get('url')
self.feed_url = kwargs.get('feed_url', self.url)
self.id = kwargs.get('id', self.feed_url)
self.updated = kwargs.get('updated')
self.author = kwargs.get('author', ())
self.icon = kwargs.get('icon')
self.logo = kwargs.get('logo')
self.rights = kwargs.get('rights')
self.rights_type = kwargs.get('rights_type')
self.subtitle = kwargs.get('subtitle')
self.subtitle_type = kwargs.get('subtitle_type', 'text')
self.generator = kwargs.get('generator')
if self.generator is None:
self.generator = self.default_generator
self.links = kwargs.get('links', [])
self.entries = entries and list(entries) or []
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, (basestring, dict)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
for author in self.author:
if 'name' not in author:
raise TypeError('author must contain at least a name')
def add(self, *args, **kwargs):
"""Add a new entry to the feed. This function can either be called
with a :class:`FeedEntry` or some keyword and positional arguments
that are forwarded to the :class:`FeedEntry` constructor.
"""
if len(args) == 1 and not kwargs and isinstance(args[0], FeedEntry):
self.entries.append(args[0])
else:
kwargs['feed_url'] = self.feed_url
self.entries.append(FeedEntry(*args, **kwargs))
def __repr__(self):
return '<%s %r (%d entries)>' % (
self.__class__.__name__,
self.title,
len(self.entries)
)
def generate(self):
"""Return a generator that yields pieces of XML."""
# atom demands either an author element in every entry or a global one
if not self.author:
if False in map(lambda e: bool(e.author), self.entries):
self.author = ({'name': 'Unknown author'},)
if not self.updated:
dates = sorted([entry.updated for entry in self.entries])
self.updated = dates and dates[-1] or datetime.utcnow()
yield u'<?xml version="1.0" encoding="utf-8"?>\n'
yield u'<feed xmlns="http://www.w3.org/2005/Atom">\n'
yield ' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url, True)
if self.feed_url:
yield u' <link href="%s" rel="self" />\n' % \
escape(self.feed_url, True)
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(link[k], True)) for k in link)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield ' <email>%s</email>\n' % escape(author['email'])
yield ' </author>\n'
if self.subtitle:
yield ' ' + _make_text_block('subtitle', self.subtitle,
self.subtitle_type)
if self.icon:
yield u' <icon>%s</icon>\n' % escape(self.icon)
if self.logo:
yield u' <logo>%s</logo>\n' % escape(self.logo)
if self.rights:
yield ' ' + _make_text_block('rights', self.rights,
self.rights_type)
generator_name, generator_url, generator_version = self.generator
if generator_name or generator_url or generator_version:
tmp = [u' <generator']
if generator_url:
tmp.append(u' uri="%s"' % escape(generator_url, True))
if generator_version:
tmp.append(u' version="%s"' % escape(generator_version, True))
tmp.append(u'>%s</generator>\n' % escape(generator_name))
yield u''.join(tmp)
for entry in self.entries:
for line in entry.generate():
yield u' ' + line
yield u'</feed>\n'
def to_string(self):
"""Convert the feed into a string."""
return u''.join(self.generate())
def get_response(self):
"""Return a response object for the feed."""
return BaseResponse(self.to_string(), mimetype='application/atom+xml')
def __call__(self, environ, start_response):
"""Use the class as WSGI response object."""
return self.get_response()(environ, start_response)
def __unicode__(self):
return self.to_string()
def __str__(self):
return self.to_string().encode('utf-8')
class FeedEntry(object):
"""Represents a single entry in a feed.
:param title: the title of the entry. Required.
:param title_type: the type attribute for the title element. One of
``'html'``, ``'text'`` or ``'xhtml'``.
:param content: the content of the entry.
:param content_type: the type attribute for the content element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param summary: a summary of the entry's content.
:param summary_type: the type attribute for the summary element. One
of ``'html'``, ``'text'`` or ``'xhtml'``.
:param url: the url for the entry.
:param id: a globally unique id for the entry. Must be an URI. If
not present the URL is used, but one of both is required.
:param updated: the time the entry was modified the last time. Must
be a :class:`datetime.datetime` object. Required.
:param author: the author of the feed. Must be either a string (the
name) or a dict with name (required) and uri or
email (both optional). Can be a list of (may be
mixed, too) strings and dicts, too, if there are
multiple authors. Required if not every entry has an
author element.
:param published: the time the entry was initially published. Must
be a :class:`datetime.datetime` object.
:param rights: copyright information for the entry.
:param rights_type: the type attribute for the rights element. One of
``'html'``, ``'text'`` or ``'xhtml'``. Default is
``'text'``.
:param links: additional links. Must be a list of dictionaries with
href (required) and rel, type, hreflang, title, length
(all optional)
:param xml_base: The xml base (url) for this feed item. If not provided
it will default to the item url.
For more information on the elements see
http://www.atomenabled.org/developers/syndication/
Everywhere where a list is demanded, any iterable can be used.
"""
def __init__(self, title=None, content=None, feed_url=None, **kwargs):
self.title = title
self.title_type = kwargs.get('title_type', 'text')
self.content = content
self.content_type = kwargs.get('content_type', 'html')
self.url = kwargs.get('url')
self.id = kwargs.get('id', self.url)
self.updated = kwargs.get('updated')
self.summary = kwargs.get('summary')
self.summary_type = kwargs.get('summary_type', 'html')
self.author = kwargs.get('author')
self.published = kwargs.get('published')
self.rights = kwargs.get('rights')
self.links = kwargs.get('links', [])
self.xml_base = kwargs.get('xml_base', feed_url)
if not hasattr(self.author, '__iter__') \
or isinstance(self.author, (basestring, dict)):
self.author = [self.author]
for i, author in enumerate(self.author):
if not isinstance(author, dict):
self.author[i] = {'name': author}
if not self.title:
raise ValueError('title is required')
if not self.id:
raise ValueError('id is required')
if not self.updated:
raise ValueError('updated is required')
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.title
)
def generate(self):
"""Yields pieces of ATOM XML."""
base = ''
if self.xml_base:
base = ' xml:base="%s"' % escape(self.xml_base, True)
yield u'<entry%s>\n' % base
yield u' ' + _make_text_block('title', self.title, self.title_type)
yield u' <id>%s</id>\n' % escape(self.id)
yield u' <updated>%s</updated>\n' % format_iso8601(self.updated)
if self.published:
yield u' <published>%s</published>\n' % \
format_iso8601(self.published)
if self.url:
yield u' <link href="%s" />\n' % escape(self.url)
for author in self.author:
yield u' <author>\n'
yield u' <name>%s</name>\n' % escape(author['name'])
if 'uri' in author:
yield u' <uri>%s</uri>\n' % escape(author['uri'])
if 'email' in author:
yield u' <email>%s</email>\n' % escape(author['email'])
yield u' </author>\n'
for link in self.links:
yield u' <link %s/>\n' % ''.join('%s="%s" ' % \
(k, escape(link[k], True)) for k in link)
if self.summary:
yield u' ' + _make_text_block('summary', self.summary,
self.summary_type)
if self.content:
yield u' ' + _make_text_block('content', self.content,
self.content_type)
yield u'</entry>\n'
def to_string(self):
"""Convert the feed item into a unicode object."""
return u''.join(self.generate())
def __unicode__(self):
return self.to_string()
def __str__(self):
return self.to_string().encode('utf-8')
|
JioCloud/neutron
|
refs/heads/master
|
neutron/plugins/metaplugin/meta_neutron_plugin.py
|
21
|
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from metaplugin.plugin import meta_neutron_plugin
MetaPluginV2 = meta_neutron_plugin.MetaPluginV2
|
JshWright/home-assistant
|
refs/heads/dev
|
homeassistant/components/sensor/verisure.py
|
2
|
"""
Interfaces with Verisure sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.verisure/
"""
import logging
from homeassistant.components.verisure import HUB as hub
from homeassistant.components.verisure import (
CONF_THERMOMETERS, CONF_HYDROMETERS, CONF_MOUSE)
from homeassistant.const import TEMP_CELSIUS
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Verisure platform."""
sensors = []
if int(hub.config.get(CONF_THERMOMETERS, 1)):
hub.update_climate()
sensors.extend([
VerisureThermometer(value.id)
for value in hub.climate_status.values()
if hasattr(value, 'temperature') and value.temperature
])
if int(hub.config.get(CONF_HYDROMETERS, 1)):
hub.update_climate()
sensors.extend([
VerisureHygrometer(value.id)
for value in hub.climate_status.values()
if hasattr(value, 'humidity') and value.humidity
])
if int(hub.config.get(CONF_MOUSE, 1)):
hub.update_mousedetection()
sensors.extend([
VerisureMouseDetection(value.deviceLabel)
for value in hub.mouse_status.values()
# is this if needed?
if hasattr(value, 'amountText') and value.amountText
])
add_devices(sensors)
class VerisureThermometer(Entity):
"""Representation of a Verisure thermometer."""
def __init__(self, device_id):
"""Initialize the sensor."""
self._id = device_id
@property
def name(self):
"""Return the name of the device."""
return '{} {}'.format(
hub.climate_status[self._id].location, 'Temperature')
@property
def state(self):
"""Return the state of the device."""
# Remove ° character
return hub.climate_status[self._id].temperature[:-1]
@property
def available(self):
"""Return True if entity is available."""
return hub.available
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return TEMP_CELSIUS
def update(self):
"""Update the sensor."""
hub.update_climate()
class VerisureHygrometer(Entity):
"""Representation of a Verisure hygrometer."""
def __init__(self, device_id):
"""Initialize the sensor."""
self._id = device_id
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(
hub.climate_status[self._id].location, 'Humidity')
@property
def state(self):
"""Return the state of the sensor."""
# remove % character
return hub.climate_status[self._id].humidity[:-1]
@property
def available(self):
"""Return True if entity is available."""
return hub.available
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this sensor."""
return "%"
def update(self):
"""Update the sensor."""
hub.update_climate()
class VerisureMouseDetection(Entity):
"""Representation of a Verisure mouse detector."""
def __init__(self, device_id):
"""Initialize the sensor."""
self._id = device_id
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(
hub.mouse_status[self._id].location, 'Mouse')
@property
def state(self):
"""Return the state of the sensor."""
return hub.mouse_status[self._id].count
@property
def available(self):
"""Return True if entity is available."""
return hub.available
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this sensor."""
return "Mice"
def update(self):
"""Update the sensor."""
hub.update_mousedetection()
|
th0mmeke/toyworld
|
refs/heads/master
|
runner.py
|
1
|
"""
Created on 1/03/2013
@author: thom
"""
from experiment import Experiment
from parameters import Parameters
from rdkit.rdBase import DisableLog, EnableLog
import logging
import collections
import importlib
class Runner(object):
"""
Run a series of experiments.
"""
def __init__(self, parameters, dirname):
self._parameters = parameters
self._dirname = dirname
self._factor_definitions = {}
if self._parameters.find("Factors") is not None:
# set up factors list
factors_xml = self._parameters.find('Factors').findall('Factor')
for factor_xml in factors_xml:
factor = {}
for f in factor_xml:
if f.tag == 'Title':
factor[f.tag] = f.text
else:
factor[f.tag] = list(f)
self._factor_definitions[factor_xml.get('key')] = factor
logging.info("Experiment factors: {}".format(self._factor_definitions))
def get_factor_definitions(self):
return self._factor_definitions
def get_experiments(self):
experiments = self._parameters.findall("Experiment") # might have multiple experiment blocks
return collections.deque([Experiment(self._dirname, xml, list(self._parameters.find("Common")), self._factor_definitions) for xml in experiments])
def get_evaluations(self):
evaluations = []
for xml in self._parameters.find("Evaluation"): # one evaluation block, but with multiple methods
evaluator_module, evaluator_class = xml.text.rsplit(".", 1)
evaluator_module = evaluator_module.rstrip()
evaluator_class = evaluator_class.rstrip()
partition_input = Parameters.convert_if_boolean(xml.get('partition')) # set flag in evaluator if requested to partition the input into sample blocks
evaluations.append(getattr(importlib.import_module(evaluator_module), evaluator_class)(partition=partition_input))
return evaluations
def run(self):
"""
Expand DoE XML design into canonical experiment format
DoE format has <Factors> block, defining the varying factors, and a <Common> block with static elements of the design
Each experiment has a set of <factor key=<key> value=<value>/> elements which need to be expanded
We also need to copy in the common elements
"""
experiments = self.get_experiments()
DisableLog('rdApp.error') # disable rdKit error messages, in particular "Explicit valence..."
logging.info("Running {} experiment{}".format(len(experiments), "s" if len(experiments) > 1 else ""))
number_of_experiments = len(experiments)
for i in range(number_of_experiments):
experiment = experiments.popleft()
logging.info("Starting experiment #{}".format(i + 1))
experiment.run()
del experiment
logging.info("Finished running experiment{}".format("s" if len(experiments) > 1 else ""))
EnableLog('rdApp.error')
|
Wafflespeanut/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/py/py/_process/killproc.py
|
278
|
import py
import os, sys
if sys.platform == "win32" or getattr(os, '_name', '') == 'nt':
try:
import ctypes
except ImportError:
def dokill(pid):
py.process.cmdexec("taskkill /F /PID %d" %(pid,))
else:
def dokill(pid):
PROCESS_TERMINATE = 1
handle = ctypes.windll.kernel32.OpenProcess(
PROCESS_TERMINATE, False, pid)
ctypes.windll.kernel32.TerminateProcess(handle, -1)
ctypes.windll.kernel32.CloseHandle(handle)
else:
def dokill(pid):
os.kill(pid, 15)
def kill(pid):
""" kill process by id. """
dokill(pid)
|
LohithBlaze/scikit-learn
|
refs/heads/master
|
sklearn/qda.py
|
140
|
"""
Quadratic Discriminant Analysis
"""
# Author: Matthieu Perrot <matthieu.perrot@gmail.com>
#
# License: BSD 3 clause
import warnings
import numpy as np
from .base import BaseEstimator, ClassifierMixin
from .externals.six.moves import xrange
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
__all__ = ['QDA']
class QDA(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis (QDA)
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
Examples
--------
>>> from sklearn.qda import QDA
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QDA()
>>> clf.fit(X, y)
QDA(priors=None, reg_param=0.0)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.lda.LDA: Linear discriminant analysis
"""
def __init__(self, priors=None, reg_param=0.):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
def fit(self, X, y, store_covariances=False, tol=1.0e-4):
"""
Fit the QDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
"""
X, y = check_X_y(X, y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
|
earthreader/libearth
|
refs/heads/master
|
tests/session_test.py
|
2
|
from __future__ import print_function
import collections
import datetime
import operator
import sys
import time
from pytest import fixture, mark, raises
from libearth.codecs import Integer
from libearth.compat import binary
from libearth.schema import Attribute, Child, Content, Text, Element
from libearth.session import (SESSION_XMLNS, MergeableDocumentElement, Revision,
RevisionCodec, RevisionSet, RevisionSetCodec,
Session, ensure_revision_pair, parse_revision)
from libearth.tz import now, utc
def test_intern():
"""Session of the same identifier cannot be multiple."""
session = Session('id1')
assert session is Session('id1')
assert session is not Session('id2')
assert session == Session('id1')
assert session != Session('id2')
assert hash(session) == hash(Session('id1'))
assert hash(session) != hash(Session('id2'))
def test_default_identifier():
assert Session().identifier != ''
assert Session().identifier is not None
assert Session().identifier != Session.identifier
def test_invalid_identifier():
with raises(ValueError):
Session('i n v a l i d')
with raises(ValueError):
Session('i*n*v*a*l*i*d')
with raises(ValueError):
Session('i+n+v+a+l+i+d')
with raises(ValueError):
Session('i/n/v/a/l/i/d')
with raises(ValueError):
Session('i\nn\nv\na\nl\ni\nd')
with raises(ValueError):
Session('i\tn\tv\ta\tl\ti\td')
with raises(ValueError):
Session('(invalid)')
Session('valid')
Session('v-a-l-i-d')
Session('v.a.l.i.d')
Session('v_a_l_i_d')
Session('v1a2l3i4d')
Session('v-a.l_i4d')
def test_revision():
session = Session()
updated_at = now()
rev = Revision(session, updated_at)
assert rev == (session, updated_at)
assert rev[0] is rev.session is session
assert rev[1] == rev.updated_at == updated_at
def test_ensure_revision_pair():
session = Session()
updated_at = now()
assert ensure_revision_pair((session, updated_at)) == (session, updated_at)
pair = ensure_revision_pair((session, updated_at), force_cast=True)
assert isinstance(pair, Revision)
assert pair == (session, updated_at)
with raises(TypeError):
ensure_revision_pair(())
with raises(TypeError):
ensure_revision_pair((session,))
with raises(TypeError):
ensure_revision_pair((session, updated_at, 1))
with raises(TypeError):
ensure_revision_pair(session)
with raises(TypeError):
ensure_revision_pair((session, 1))
with raises(TypeError):
ensure_revision_pair((1, updated_at))
@fixture
def fx_revision_set():
dt = datetime.datetime
return RevisionSet([
(Session('key1'), dt(2013, 9, 22, 16, 58, 57, tzinfo=utc)),
(Session('key2'), dt(2013, 9, 22, 16, 59, 30, tzinfo=utc)),
(Session('key3'), dt(2013, 9, 22, 17, 0, 30, tzinfo=utc)),
(Session('key4'), dt(2013, 9, 22, 17, 10, 30, tzinfo=utc))
])
def test_revision_set(fx_revision_set):
assert isinstance(fx_revision_set, collections.Mapping)
assert len(fx_revision_set) == 4
assert set(fx_revision_set) == set([Session('key1'), Session('key2'),
Session('key3'), Session('key4')])
assert (fx_revision_set[Session('key1')] ==
datetime.datetime(2013, 9, 22, 16, 58, 57, tzinfo=utc))
assert (fx_revision_set[Session('key2')] ==
datetime.datetime(2013, 9, 22, 16, 59, 30, tzinfo=utc))
for pair in fx_revision_set.items():
assert isinstance(pair, Revision)
assert fx_revision_set
assert not RevisionSet()
def test_revision_set_copy(fx_revision_set):
clone = fx_revision_set.copy()
assert isinstance(clone, RevisionSet)
assert clone == fx_revision_set
assert clone is not fx_revision_set
def test_revision_set_merge(fx_revision_set):
dt = datetime.datetime
initial = fx_revision_set.copy()
with raises(TypeError):
fx_revision_set.merge()
with raises(TypeError):
fx_revision_set.merge(fx_revision_set, [])
assert fx_revision_set.merge(fx_revision_set) == fx_revision_set
assert fx_revision_set == initial
session_a = Session()
session_b = Session()
merged = fx_revision_set.merge(
RevisionSet([
(Session('key1'), dt(2013, 9, 23, 18, 40, 48, tzinfo=utc)),
(Session('key2'), dt(2012, 9, 23, 18, 40, 48, tzinfo=utc)),
(session_a, dt(2013, 9, 23, 18, 40, 48, tzinfo=utc)),
(session_b, dt(2013, 9, 23, 18, 41, 00, tzinfo=utc))
])
)
assert merged == RevisionSet([
(Session('key1'), dt(2013, 9, 23, 18, 40, 48, tzinfo=utc)),
(Session('key2'), dt(2013, 9, 22, 16, 59, 30, tzinfo=utc)),
(Session('key3'), dt(2013, 9, 22, 17, 0, 30, tzinfo=utc)),
(Session('key4'), dt(2013, 9, 22, 17, 10, 30, tzinfo=utc)),
(session_a, dt(2013, 9, 23, 18, 40, 48, tzinfo=utc)),
(session_b, dt(2013, 9, 23, 18, 41, 00, tzinfo=utc))
])
assert fx_revision_set == initial
merged = fx_revision_set.merge(
RevisionSet([
(Session('key1'), dt(2013, 9, 23, 18, 40, 48, tzinfo=utc)),
(Session('key2'), dt(2012, 9, 23, 18, 40, 48, tzinfo=utc)),
(session_a, dt(2013, 9, 23, 18, 40, 48, tzinfo=utc))
]),
RevisionSet([
(Session('key3'), dt(2012, 9, 22, 17, 0, 30, tzinfo=utc)),
(Session('key4'), dt(2013, 9, 23, 19, 10, 30, tzinfo=utc)),
(session_a, dt(2013, 9, 23, 19, 8, 47, tzinfo=utc))
])
)
assert merged == RevisionSet([
(Session('key1'), dt(2013, 9, 23, 18, 40, 48, tzinfo=utc)),
(Session('key2'), dt(2013, 9, 22, 16, 59, 30, tzinfo=utc)),
(Session('key3'), dt(2013, 9, 22, 17, 0, 30, tzinfo=utc)),
(Session('key4'), dt(2013, 9, 23, 19, 10, 30, tzinfo=utc)),
(session_a, dt(2013, 9, 23, 19, 8, 47, tzinfo=utc))
])
def test_revision_set_contains(fx_revision_set):
assert not fx_revision_set.contains(Revision(Session('key0'), now()))
assert not fx_revision_set.contains(
Revision(Session('key1'),
datetime.datetime(2013, 9, 27, 16, 54, 50, tzinfo=utc))
)
assert fx_revision_set.contains(
Revision(Session('key1'),
datetime.datetime(2013, 9, 22, 16, 58, 57, tzinfo=utc))
)
assert fx_revision_set.contains(
Revision(Session('key1'),
datetime.datetime(2012, 9, 22, 16, 58, 57, tzinfo=utc))
)
assert not fx_revision_set.contains(
Revision(Session('key0'),
datetime.datetime(2012, 9, 22, 16, 58, 57, tzinfo=utc))
)
def test_revision_codec():
session = Session('test-identifier')
updated_at = datetime.datetime(2013, 9, 22, 3, 43, 40, tzinfo=utc)
rev = Revision(session, updated_at)
codec = RevisionCodec()
assert codec.encode(rev) == 'test-identifier 2013-09-22T03:43:40Z'
assert codec.encode(tuple(rev)) == 'test-identifier 2013-09-22T03:43:40Z'
decoded = codec.decode('test-identifier 2013-09-22T03:43:40Z')
assert decoded == rev
assert decoded.session is session
assert decoded.updated_at == updated_at
def test_revision_set_codec(fx_revision_set):
codec = RevisionSetCodec()
expected = '''key4 2013-09-22T17:10:30Z,
key3 2013-09-22T17:00:30Z,
key2 2013-09-22T16:59:30Z,
key1 2013-09-22T16:58:57Z'''
assert codec.encode(fx_revision_set) == expected
assert codec.decode(expected) == fx_revision_set
class TestUniqueEntity(Element):
ident = Text('ident', required=True)
value = Attribute('value')
def __entity_id__(self):
return self.ident
class TestRevisedEntity(Element):
ident = Text('ident', required=True)
rev = Attribute('rev', Integer)
value = Attribute('value')
def __entity_id__(self):
return self.ident
def __merge_entities__(self, other):
return max(self, other, key=operator.attrgetter('rev'))
class TestMergeableDoc(MergeableDocumentElement):
__tag__ = 'merge-test'
multi_text = Text('multi-text', multiple=True)
text = Text('text')
attr = Attribute('attr')
unique_entities = Child('unique-entity', TestUniqueEntity, multiple=True)
rev_entities = Child('rev-multi-entity', TestRevisedEntity, multiple=True)
rev_entity = Child('rev-single-entity', TestRevisedEntity)
nullable = Child('nullable-entity', TestUniqueEntity)
class TestMergeableContentDoc(MergeableDocumentElement):
__tag__ = 'merge-content-test'
content = Content()
def test_session_revise():
doc = TestMergeableDoc()
min_updated_at = now()
session = Session()
session.revise(doc)
assert isinstance(doc.__revision__, Revision)
assert doc.__revision__.session is session
assert min_updated_at <= doc.__revision__.updated_at <= now()
time.sleep(0.1)
min_updated_at = now()
session.revise(doc)
assert min_updated_at <= doc.__revision__.updated_at <= now()
@mark.parametrize('revised', [True, False])
def test_session_pull(revised):
s1 = Session('s1')
s2 = Session('s2')
a = TestMergeableDoc(multi_text=['a', 'b', 'c'])
if revised:
s1.revise(a)
b = s2.pull(a)
assert b is not a
assert b.__revision__.session is s2
if revised:
assert b.__revision__.updated_at == a.__revision__.updated_at
assert b.multi_text == ['a', 'b', 'c']
assert a.multi_text is not b.multi_text
if revised:
assert a.__revision__.session is s1
def test_session_pull_same_session():
session = Session('s1')
doc = TestMergeableDoc()
session.revise(doc)
assert session.pull(doc) is doc
def wait():
# Windows doesn't provide enough precision to datetime.now().
if sys.platform == 'win32':
time.sleep(0.5)
def test_session_merge():
# s1 s2
# ------
# (1) a b (2)
# | / |
# (3) c b (4)
# | \ |
# | d (5)
# | /
# (5) e
s1 = Session('s1')
a = TestMergeableDoc(
attr='a',
text='a',
multi_text=['a', 'b', 'c'],
unique_entities=[
TestUniqueEntity(ident='a', value='s1-a'),
TestUniqueEntity(ident='b', value='s1-b'),
TestUniqueEntity(ident='c', value='s1-c')
],
rev_entities=[
TestRevisedEntity(ident='a', value='s1-a', rev=2),
TestRevisedEntity(ident='b', value='s1-b', rev=2),
TestRevisedEntity(ident='c', value='s1-c', rev=2)
],
rev_entity=TestRevisedEntity(ident='a', value='s1', rev=1)
)
a_c = TestMergeableContentDoc(content='a')
s1.revise(a) # (1)
s1.revise(a_c)
wait()
s2 = Session('s2')
b = TestMergeableDoc(
attr='b',
text='b',
multi_text=['d', 'e', 'f'],
unique_entities=[
TestUniqueEntity(ident='c', value='s2-c'),
TestUniqueEntity(ident='d', value='s2-d'),
TestUniqueEntity(ident='e', value='s2-e')
],
rev_entities=[
TestRevisedEntity(ident='b', value='s2-b', rev=1),
TestRevisedEntity(ident='c', value='s2-c', rev=3),
TestRevisedEntity(ident='d', value='s2-d', rev=2)
],
rev_entity=TestRevisedEntity(ident='a', value='s2', rev=2)
)
b_c = TestMergeableContentDoc(content='b')
s2.revise(b) # (2)
s2.revise(b_c)
wait()
c = s1.merge(b, a) # (3)
c_c = s1.merge(b_c, a_c)
wait()
assert c.__revision__.session is s1
assert c.__revision__.updated_at > a.__revision__.updated_at
assert c.__base_revisions__ == RevisionSet([a.__revision__, b.__revision__])
print((c.attr, c.text, c_c.content))
assert c.attr == c.text == c_c.content == 'b'
assert list(c.multi_text) == ['a', 'b', 'c', 'd', 'e', 'f']
assert ([entity.value for entity in c.unique_entities] ==
['s1-a', 's1-b', 's2-c', 's2-d', 's2-e'])
assert ([(e.value, e.rev) for e in c.rev_entities] ==
[('s1-a', 2), ('s1-b', 2), ('s2-c', 3), ('s2-d', 2)])
assert c.rev_entity.rev == 2
assert c.rev_entity.value == 's2'
assert c.nullable is None
c.nullable = TestUniqueEntity(ident='nullable', value='nullable')
b.attr = b.text = b_c.content = 'd'
b.multi_text.append('blah')
b.unique_entities.append(TestUniqueEntity(ident='blah', value='s2-blah'))
s2.revise(b) # (4)
s2.revise(b_c)
wait()
assert list(b.multi_text) == ['d', 'e', 'f', 'blah']
assert ([entity.value for entity in b.unique_entities] ==
['s2-c', 's2-d', 's2-e', 's2-blah'])
d = s2.merge(b, c) # (5)
d_c = s2.merge(b_c, c_c)
wait()
assert d.__revision__.session is s2
assert d.__revision__.updated_at >= c.__revision__.updated_at
assert d.__base_revisions__ == RevisionSet([b.__revision__, c.__revision__])
assert d.attr == d.text == d_c.content == 'd'
assert list(d.multi_text) == ['a', 'b', 'c', 'd', 'e', 'f', 'blah']
assert ([entity.value for entity in d.unique_entities] ==
['s1-a', 's1-b', 's2-c', 's2-d', 's2-e', 's2-blah'])
assert d.nullable is not None
assert d.nullable.value == 'nullable'
e = s1.merge(c, d) # (5)
e_c = s1.merge(c_c, d_c)
wait()
assert e.__revision__.session is s1
assert e.__revision__.updated_at == d.__revision__.updated_at
assert e.__base_revisions__ == d.__base_revisions__
assert e.attr == e.text == e_c.content == 'd'
assert list(e.multi_text) == ['a', 'b', 'c', 'd', 'e', 'f', 'blah']
assert ([entity.value for entity in d.unique_entities] ==
['s1-a', 's1-b', 's2-c', 's2-d', 's2-e', 's2-blah'])
@mark.parametrize(('iterable', 'rv'), [
(['<doc ', 'xmlns:s="', SESSION_XMLNS,
'" s:revision="test 2013-09-22T03:43:40Z" ', 's:bases="" ', '/>'],
(Revision(Session('test'),
datetime.datetime(2013, 9, 22, 3, 43, 40, tzinfo=utc)),
RevisionSet())),
(['<doc ', 'xmlns:s="', SESSION_XMLNS,
'" s:revision="test 2013-09-22T03:43:40Z" ', 's:bases="">',
'<a />', '</doc>'],
(Revision(Session('test'),
datetime.datetime(2013, 9, 22, 3, 43, 40, tzinfo=utc)),
RevisionSet())),
(['<doc ', 'xmlns:s="', SESSION_XMLNS,
'" s:revision="test 2013-09-22T03:43:40Z" ', 's:bases=""><a /></doc>'],
(Revision(Session('test'),
datetime.datetime(2013, 9, 22, 3, 43, 40, tzinfo=utc)),
RevisionSet())),
(['<?xml version="1.0" encoding="utf-8"?>\n', '<ns1:feed xmlns:ns0="',
SESSION_XMLNS, '" xmlns:ns1="http://www.w3.org/2005/Atom" ',
'xmlns:ns2="http://earthreader.org/mark/" ',
'ns0:bases="a 2013-11-17T16:36:46.003058Z" ',
'ns0:revision="a 2013-11-17T16:36:46.033062Z">', '</ns1:feed>'],
(Revision(Session('a'),
datetime.datetime(2013, 11, 17, 16, 36, 46, 33062, tzinfo=utc)),
RevisionSet([
Revision(
Session('a'),
datetime.datetime(2013, 11, 17, 16, 36, 46, 3058, tzinfo=utc)
)
]))),
(['<doc ', ' revision="test 2013-09-22T03:43:40Z" ', 'bases="" ', '/>'],
None),
(['<doc ', ' revision="test 2013-09-22T03:43:40Z" ', 'bases="">',
'<a />', '</doc>'], None),
(['<doc>', '<a />' '</doc>'], None),
(['<doc', ' />'], None),
])
def test_parse_revision(iterable, rv):
assert parse_revision(map(binary, iterable)) == rv
|
datenbetrieb/odoo
|
refs/heads/8.0
|
addons/website_sale_delivery/models/sale_order.py
|
176
|
# -*- coding: utf-8 -*-
from openerp.osv import orm, fields
from openerp import SUPERUSER_ID
from openerp.addons import decimal_precision
from openerp.tools.translate import _
class delivery_carrier(orm.Model):
_inherit = 'delivery.carrier'
_columns = {
'website_published': fields.boolean('Available in the website', copy=False),
'website_description': fields.text('Description for the website'),
}
_defaults = {
'website_published': True
}
class SaleOrder(orm.Model):
_inherit = 'sale.order'
def _amount_all_wrapper(self, cr, uid, ids, field_name, arg, context=None):
""" Wrapper because of direct method passing as parameter for function fields """
return self._amount_all(cr, uid, ids, field_name, arg, context=context)
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
res = super(SaleOrder, self)._amount_all(cr, uid, ids, field_name, arg, context=context)
currency_pool = self.pool.get('res.currency')
for order in self.browse(cr, uid, ids, context=context):
line_amount = sum([line.price_subtotal for line in order.order_line if line.is_delivery])
currency = order.pricelist_id.currency_id
res[order.id]['amount_delivery'] = currency_pool.round(cr, uid, currency, line_amount)
return res
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('sale.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
_columns = {
'amount_delivery': fields.function(
_amount_all_wrapper, type='float', digits_compute=decimal_precision.get_precision('Account'),
string='Delivery Amount',
store={
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),
},
multi='sums', help="The amount without tax.", track_visibility='always'
),
'website_order_line': fields.one2many(
'sale.order.line', 'order_id',
string='Order Lines displayed on Website', readonly=True,
domain=[('is_delivery', '=', False)],
help='Order Lines to be displayed on the website. They should not be used for computation purpose.',
),
}
def _check_carrier_quotation(self, cr, uid, order, force_carrier_id=None, context=None):
carrier_obj = self.pool.get('delivery.carrier')
# check to add or remove carrier_id
if not order:
return False
if all(line.product_id.type == "service" for line in order.website_order_line):
order.write({'carrier_id': None})
self.pool['sale.order']._delivery_unset(cr, SUPERUSER_ID, [order.id], context=context)
return True
else:
carrier_id = force_carrier_id or order.carrier_id.id
carrier_ids = self._get_delivery_methods(cr, uid, order, context=context)
if carrier_id:
if carrier_id not in carrier_ids:
carrier_id = False
else:
carrier_ids.remove(carrier_id)
carrier_ids.insert(0, carrier_id)
if force_carrier_id or not carrier_id or not carrier_id in carrier_ids:
for delivery_id in carrier_ids:
grid_id = carrier_obj.grid_get(cr, SUPERUSER_ID, [delivery_id], order.partner_shipping_id.id)
if grid_id:
carrier_id = delivery_id
break
order.write({'carrier_id': carrier_id})
if carrier_id:
order.delivery_set()
else:
order._delivery_unset()
return bool(carrier_id)
def _get_delivery_methods(self, cr, uid, order, context=None):
carrier_obj = self.pool.get('delivery.carrier')
delivery_ids = carrier_obj.search(cr, uid, [('website_published','=',True)], context=context)
# Following loop is done to avoid displaying delivery methods who are not available for this order
# This can surely be done in a more efficient way, but at the moment, it mimics the way it's
# done in delivery_set method of sale.py, from delivery module
for delivery_id in carrier_obj.browse(cr, SUPERUSER_ID, delivery_ids, context=dict(context, order_id=order.id)):
if not delivery_id.available:
delivery_ids.remove(delivery_id.id)
return delivery_ids
def _get_errors(self, cr, uid, order, context=None):
errors = super(SaleOrder, self)._get_errors(cr, uid, order, context=context)
if not self._get_delivery_methods(cr, uid, order, context=context):
errors.append(
(_('Sorry, we are unable to ship your order'),
_('No shipping method is available for your current order and shipping address. '
'Please contact us for more information.')))
return errors
def _get_website_data(self, cr, uid, order, context=None):
""" Override to add delivery-related website data. """
values = super(SaleOrder, self)._get_website_data(cr, uid, order, context=context)
# We need a delivery only if we have stockable products
has_stockable_products = False
for line in order.order_line:
if line.product_id.type in ('consu', 'product'):
has_stockable_products = True
if not has_stockable_products:
return values
delivery_ctx = dict(context, order_id=order.id)
DeliveryCarrier = self.pool.get('delivery.carrier')
delivery_ids = self._get_delivery_methods(cr, uid, order, context=context)
values['deliveries'] = DeliveryCarrier.browse(cr, SUPERUSER_ID, delivery_ids, context=delivery_ctx)
return values
def _cart_update(self, cr, uid, ids, product_id=None, line_id=None, add_qty=0, set_qty=0, context=None, **kwargs):
""" Override to update carrier quotation if quantity changed """
values = super(SaleOrder, self)._cart_update(
cr, uid, ids, product_id, line_id, add_qty, set_qty, context, **kwargs)
if add_qty or set_qty is not None:
for sale_order in self.browse(cr, uid, ids, context=context):
self._check_carrier_quotation(cr, uid, sale_order, context=context)
return values
|
snazy2000/netbox
|
refs/heads/develop
|
netbox/dcim/migrations/0021_add_ff_flexstack.py
|
2
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-31 18:47
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dcim', '0020_rack_desc_units'),
]
operations = [
migrations.AlterField(
model_name='device',
name='position',
field=models.PositiveSmallIntegerField(blank=True, help_text=b'The lowest-numbered unit occupied by the device', null=True, validators=[django.core.validators.MinValueValidator(1)], verbose_name=b'Position (U)'),
),
migrations.AlterField(
model_name='interface',
name='form_factor',
field=models.PositiveSmallIntegerField(choices=[[b'Virtual interfaces', [[0, b'Virtual']]], [b'Ethernet (fixed)', [[800, b'100BASE-TX (10/100ME)'], [1000, b'1000BASE-T (1GE)'], [1150, b'10GBASE-T (10GE)']]], [b'Ethernet (modular)', [[1050, b'GBIC (1GE)'], [1100, b'SFP (1GE)'], [1200, b'SFP+ (10GE)'], [1300, b'XFP (10GE)'], [1310, b'XENPAK (10GE)'], [1320, b'X2 (10GE)'], [1350, b'SFP28 (25GE)'], [1400, b'QSFP+ (40GE)'], [1500, b'CFP (100GE)'], [1600, b'QSFP28 (100GE)']]], [b'FibreChannel', [[3010, b'SFP (1GFC)'], [3020, b'SFP (2GFC)'], [3040, b'SFP (4GFC)'], [3080, b'SFP+ (8GFC)'], [3160, b'SFP+ (16GFC)']]], [b'Serial', [[4000, b'T1 (1.544 Mbps)'], [4010, b'E1 (2.048 Mbps)'], [4040, b'T3 (45 Mbps)'], [4050, b'E3 (34 Mbps)']]], [b'Stacking', [[5000, b'Cisco StackWise'], [5050, b'Cisco StackWise Plus'], [5100, b'Cisco FlexStack'], [5150, b'Cisco FlexStack Plus']]], [b'Other', [[32767, b'Other']]]], default=1200),
),
migrations.AlterField(
model_name='interfacetemplate',
name='form_factor',
field=models.PositiveSmallIntegerField(choices=[[b'Virtual interfaces', [[0, b'Virtual']]], [b'Ethernet (fixed)', [[800, b'100BASE-TX (10/100ME)'], [1000, b'1000BASE-T (1GE)'], [1150, b'10GBASE-T (10GE)']]], [b'Ethernet (modular)', [[1050, b'GBIC (1GE)'], [1100, b'SFP (1GE)'], [1200, b'SFP+ (10GE)'], [1300, b'XFP (10GE)'], [1310, b'XENPAK (10GE)'], [1320, b'X2 (10GE)'], [1350, b'SFP28 (25GE)'], [1400, b'QSFP+ (40GE)'], [1500, b'CFP (100GE)'], [1600, b'QSFP28 (100GE)']]], [b'FibreChannel', [[3010, b'SFP (1GFC)'], [3020, b'SFP (2GFC)'], [3040, b'SFP (4GFC)'], [3080, b'SFP+ (8GFC)'], [3160, b'SFP+ (16GFC)']]], [b'Serial', [[4000, b'T1 (1.544 Mbps)'], [4010, b'E1 (2.048 Mbps)'], [4040, b'T3 (45 Mbps)'], [4050, b'E3 (34 Mbps)']]], [b'Stacking', [[5000, b'Cisco StackWise'], [5050, b'Cisco StackWise Plus'], [5100, b'Cisco FlexStack'], [5150, b'Cisco FlexStack Plus']]], [b'Other', [[32767, b'Other']]]], default=1200),
),
]
|
google-code-export/evennia
|
refs/heads/master
|
src/server/portal/msdp.py
|
2
|
"""
MSDP (Mud Server Data Protocol)
This implements the MSDP protocol as per
http://tintin.sourceforge.net/msdp/. MSDP manages out-of-band
communication between the client and server, for updating health bars
etc.
"""
import re
from src.utils.utils import to_str
# MSDP-relevant telnet cmd/opt-codes
MSDP = chr(69)
MSDP_VAR = chr(1)
MSDP_VAL = chr(2)
MSDP_TABLE_OPEN = chr(3)
MSDP_TABLE_CLOSE = chr(4)
MSDP_ARRAY_OPEN = chr(5)
MSDP_ARRAY_CLOSE = chr(6)
IAC = chr(255)
SB = chr(250)
SE = chr(240)
force_str = lambda inp: to_str(inp, force_string=True)
# pre-compiled regexes
# returns 2-tuple
regex_array = re.compile(r"%s(.*?)%s%s(.*?)%s" % (MSDP_VAR, MSDP_VAL,
MSDP_ARRAY_OPEN,
MSDP_ARRAY_CLOSE))
# returns 2-tuple (may be nested)
regex_table = re.compile(r"%s(.*?)%s%s(.*?)%s" % (MSDP_VAR, MSDP_VAL,
MSDP_TABLE_OPEN,
MSDP_TABLE_CLOSE))
regex_var = re.compile(MSDP_VAR)
regex_val = re.compile(MSDP_VAL)
# Msdp object handler
class Msdp(object):
"""
Implements the MSDP protocol.
"""
def __init__(self, protocol):
"""
Initiates by storing the protocol
on itself and trying to determine
if the client supports MSDP.
"""
self.protocol = protocol
self.protocol.protocol_flags['MSDP'] = False
self.protocol.negotiationMap[MSDP] = self.msdp_to_evennia
self.protocol.will(MSDP).addCallbacks(self.do_msdp, self.no_msdp)
self.msdp_reported = {}
def no_msdp(self, option):
"No msdp supported or wanted"
pass
def do_msdp(self, option):
"""
Called when client confirms that it can do MSDP.
"""
self.protocol.protocol_flags['MSDP'] = True
def evennia_to_msdp(self, cmdname, *args, **kwargs):
"""
handle return data from cmdname by converting it to
a proper msdp structure. data can either be a single value (will be
converted to a string), a list (will be converted to an MSDP_ARRAY),
or a dictionary (will be converted to MSDP_TABLE).
OBS - there is no actual use of arrays and tables in the MSDP
specification or default commands -- are returns are implemented
as simple lists or named lists (our name for them here, these
un-bounded structures are not named in the specification). So for
now, this routine will not explicitly create arrays nor tables,
although there are helper methods ready should it be needed in
the future.
"""
def make_table(name, **kwargs):
"build a table that may be nested with other tables or arrays."
string = MSDP_VAR + force_str(name) + MSDP_VAL + MSDP_TABLE_OPEN
for key, val in kwargs.items():
if isinstance(val, dict):
string += make_table(string, key, **val)
elif hasattr(val, '__iter__'):
string += make_array(string, key, *val)
else:
string += MSDP_VAR + force_str(key) + MSDP_VAL + force_str(val)
string += MSDP_TABLE_CLOSE
return string
def make_array(name, *args):
"build a array. Arrays may not nest tables by definition."
string = MSDP_VAR + force_str(name) + MSDP_ARRAY_OPEN
string += MSDP_VAL.join(force_str(arg) for arg in args)
string += MSDP_ARRAY_CLOSE
return string
def make_list(name, *args):
"build a simple list - an array without start/end markers"
string = MSDP_VAR + force_str(name)
string += MSDP_VAL.join(force_str(arg) for arg in args)
return string
def make_named_list(name, **kwargs):
"build a named list - a table without start/end markers"
string = MSDP_VAR + force_str(name)
for key, val in kwargs.items():
string += MSDP_VAR + force_str(key) + MSDP_VAL + force_str(val)
return string
# Default MSDP commands
print "MSDP outgoing:", cmdname, args, kwargs
cupper = cmdname.upper()
if cupper == "LIST":
if args:
args = list(args)
mode = args.pop(0).upper()
self.data_out(make_array(mode, *args))
elif cupper == "REPORT":
self.data_out(make_list("REPORT", *args))
elif cupper == "UNREPORT":
self.data_out(make_list("UNREPORT", *args))
elif cupper == "RESET":
self.data_out(make_list("RESET", *args))
elif cupper == "SEND":
self.data_out(make_named_list("SEND", **kwargs))
else:
# return list or named lists.
msdp_string = ""
if args:
msdp_string += make_list(cupper, *args)
if kwargs:
msdp_string += make_named_list(cupper, **kwargs)
self.data_out(msdp_string)
def msdp_to_evennia(self, data):
"""
Handle a client's requested negotiation, converting
it into a function mapping - either one of the MSDP
default functions (LIST, SEND etc) or a custom one
in OOB_FUNCS dictionary. command names are case-insensitive.
varname, var --> mapped to function varname(var)
arrayname, array --> mapped to function arrayname(*array)
tablename, table --> mapped to function tablename(**table)
Note: Combinations of args/kwargs to one function is not supported
in this implementation (it complicates the code for limited
gain - arrayname(*array) is usually as complex as anyone should
ever need to go anyway (I hope!).
"""
tables = {}
arrays = {}
variables = {}
if hasattr(data, "__iter__"):
data = "".join(data)
#logger.log_infomsg("MSDP SUBNEGOTIATION: %s" % data)
for key, table in regex_table.findall(data):
tables[key] = {}
for varval in regex_var.split(table):
parts = regex_val.split(varval)
tables[key].expand({parts[0]: tuple(parts[1:]) if len(parts) > 1 else ("",)})
for key, array in regex_array.findall(data):
arrays[key] = []
for val in regex_val.split(array):
arrays[key].append(val)
arrays[key] = tuple(arrays[key])
for varval in regex_var.split(regex_array.sub("", regex_table.sub("", data))):
# get remaining varvals after cleaning away tables/arrays
parts = regex_val.split(varval)
variables[parts[0].upper()] = tuple(parts[1:]) if len(parts) > 1 else ("", )
#print "MSDP: table, array, variables:", tables, arrays, variables
# all variables sent through msdp to Evennia are considered commands
# with arguments. There are three forms of commands possible
# through msdp:
#
# VARNAME VAR -> varname(var)
# ARRAYNAME VAR VAL VAR VAL VAR VAL ENDARRAY -> arrayname(val,val,val)
# TABLENAME TABLE VARNAME VAL VARNAME VAL ENDTABLE ->
# tablename(varname=val, varname=val)
#
# default MSDP functions
if "LIST" in variables:
self.data_in("list", *variables.pop("LIST"))
if "REPORT" in variables:
self.data_in("report", *variables.pop("REPORT"))
if "REPORT" in arrays:
self.data_in("report", *(arrays.pop("REPORT")))
if "UNREPORT" in variables:
self.data_in("unreport", *(arrays.pop("UNREPORT")))
if "RESET" in variables:
self.data_in("reset", *variables.pop("RESET"))
if "RESET" in arrays:
self.data_in("reset", *(arrays.pop("RESET")))
if "SEND" in variables:
self.data_in("send", *variables.pop("SEND"))
if "SEND" in arrays:
self.data_in("send", *(arrays.pop("SEND")))
# if there are anything left consider it a call to a custom function
for varname, var in variables.items():
# a simple function + argument
self.data_in(varname, (var,))
for arrayname, array in arrays.items():
# we assume the array are multiple arguments to the function
self.data_in(arrayname, *array)
for tablename, table in tables.items():
# we assume tables are keyword arguments to the function
self.data_in(tablename, **table)
def data_out(self, msdp_string):
"""
Return a msdp-valid subnegotiation across the protocol.
"""
#print "msdp data_out (without IAC SE):", msdp_string
self.protocol ._write(IAC + SB + MSDP + force_str(msdp_string) + IAC + SE)
def data_in(self, funcname, *args, **kwargs):
"""
Send oob data to Evennia
"""
#print "msdp data_in:", funcname, args, kwargs
self.protocol.data_in(text=None, oob=(funcname, args, kwargs))
|
alvarofe/cassle
|
refs/heads/master
|
tls/tls_stream.py
|
1
|
# Copyright (C) 2014 Alvaro Felipe Melchor (alvaro.felipe91@gmail.com)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from tls import tls_types
import struct
from tls.tls_verification import TLSVerificationDispatch
class TLSStream():
"""
Manage a TLSStream. Basically split tls_records and process each of them
to retrieve handshake messages to validate after through
TLSVerificationDispatch.
"""
def __init__(self, raw_data):
self.cert_chain = None
self.status_request = None
self._process_tcp_stream(raw_data)
self._split_tls_record()
self._decode_tls_record()
self._dispatch()
def _dispatch(self):
data = {
"cert": None,
"status_request": None
}
if self.cert_chain:
data["cert"] = self.cert_chain
if self.status_request:
data["status_request"] = self.status_request
TLSVerificationDispatch(data)
def _split_tls_record(self):
"""Split the TLS stream into TLS records"""
data = self._stream
self._record = []
try:
while True:
(
content_type,
version_major,
version_minor,
length
) = struct.unpack_from("!BBBH", data, 0)
self._record.append(data[5:5+length])
data = data[5+length:]
except:
# We reach the final of our stream
pass
def _process_tcp_stream(self, raw_data):
"""
Assemblies tcp streams to process it latter.
"""
# raw_data in reality is the stream that we collected in the
# function assembler in util.py how we saved the data in a
# dictionary with the seq number we only have to order the
# seq number and join each packet.
aux = raw_data
self._stream = str()
# Sorted because we must ordered in function of the
# number of sequence
for key in sorted(aux):
self._stream += aux[key]
def _decode_tls_record(self):
"""
Method to extract certificate message from tls_record
"""
# You could augment this method to extract more message and add more
# logic to the application. Sometimes after a server hello comes
# a certificate without the necessity to use a new tls record
for record in self._record:
try:
# content_type = struct.unpack_from("!B",record,0)[0]
(
message_type,
message_length
) = struct.unpack_from("!BI", record, 0)
message_length >>= 8
if message_type == tls_types.TLS_H_TYPE_SERVER_HELLO:
data = record[4+message_length:]
_type, _length = struct.unpack_from("!BI", data, 0)
_length >>= 8
if _type == tls_types.TLS_H_TYPE_CERTIFICATE:
self._decode_certificate(data[4:4+_length])
if message_type == tls_types.TLS_H_TYPE_CERTIFICATE:
self._decode_certificate(record[4:4+message_length])
except:
# We treat with the at least a record incompleted o corrupted
pass
def _decode_certificate(self, cert_handshake):
data = cert_handshake
self.cert_chain = []
cert_length = struct.unpack_from("!I", data, 0)[0] >> 8
data = data[3:]
total_length = 0
while total_length != cert_length:
length = struct.unpack_from("!I", data, 0)[0] >> 8
total_length += length + 3
self.cert_chain.append(data[3:3+length])
data = data[3+length:]
|
AnselCmy/ARPS
|
refs/heads/master
|
report_spider/report_spider/spiders/USTC002.py
|
3
|
# -*- coding:utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import time
import scrapy
from Global_function import get_localtime, print_new_number, save_messages
now_time = get_localtime(time.strftime("%Y-%m-%d", time.localtime()))
# now_time = 20170410
class USTC002_Spider(scrapy.Spider):
name = 'USTC002'
start_urls = ['http://ess.ustc.edu.cn/notice']
domain = 'http://ess.ustc.edu.cn/'
counts = 0
def parse(self, response):
messages = response.xpath("//div[@class='view-content']/table/tbody/tr")
print_new_number(self.counts, 'USTC', self.name)
sign = 0
for i in xrange(len(messages)):
message = messages[i].xpath(".//td")
report_url = self.domain + message[0].xpath(".//a/@href").extract()[0][1:]
report_class = message[1].xpath(".//text()").extract()[0].strip()
report_time = get_localtime(message[2].xpath(".//text()").extract()[0].strip())
if u'学术报告' not in report_class:
continue
if report_time < now_time:
sign = 1
continue
yield scrapy.Request(report_url, callback=self.parse_pages, meta={'link': report_url, 'number': i + 1})
def parse_pages(self, response):
messages = response.xpath("//div[@class='inside panels-flexible-region-inside panels-flexible-region-jcjz-center-inside panels-flexible-region-inside-first']/div")
# address
address = self.get_message(messages[0])
# time
time = self.get_message(messages[1])
# speaker
speaker = self.get_message(messages[2])
# other message: title, person_introduce, content
img_url = ''
person_introduce = ''
title = ''
content = ''
for i in range(3, len(messages)):
part_name = messages[i].xpath(".//div[@class='field-label']/text()").extract()[0]
img_exist = messages[i].xpath(".//img")
if len(img_exist) != 0:
img_url = self.get_img(messages[i])
if u'报告人简介' in part_name:
person_introduce = self.get_message(messages[i])
elif u'题目' in part_name:
title = self.get_message(messages[i])
else:
content = self.get_message(messages[i])
# break
if title == '':
title = response.xpath("//h1/text()").extract()[0]
if img_url != '':
img_url = self.domain + img_url[1:]
if title != '':
self.counts += 1
print_new_number(self.counts, 'USTC', self.name)
all_messages = save_messages('USTC', self.name, title, time, address, speaker, person_introduce,
content, img_url, response.meta['link'], response.meta['number'], u'中国科学技术大学', u'地球和空间科学学院')
return all_messages
def get_message(self, messages):
text_list = messages.xpath(".//div[@class='field-items']").xpath(".//text()").extract()
texts = ''
for text in text_list:
texts += text.strip()
return texts
def get_img(self, messages):
img_url = messages.xpath(".//img/@src").extract()[0]
return img_url
|
thehajime/ns-3-dev
|
refs/heads/master
|
src/core/bindings/modulegen__gcc_ILP32.py
|
5
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.core', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## log.h (module 'core'): ns3::LogLevel [enumeration]
module.add_enum('LogLevel', ['LOG_NONE', 'LOG_ERROR', 'LOG_LEVEL_ERROR', 'LOG_WARN', 'LOG_LEVEL_WARN', 'LOG_DEBUG', 'LOG_LEVEL_DEBUG', 'LOG_INFO', 'LOG_LEVEL_INFO', 'LOG_FUNCTION', 'LOG_LEVEL_FUNCTION', 'LOG_LOGIC', 'LOG_LEVEL_LOGIC', 'LOG_ALL', 'LOG_LEVEL_ALL', 'LOG_PREFIX_FUNC', 'LOG_PREFIX_TIME', 'LOG_PREFIX_NODE', 'LOG_PREFIX_LEVEL', 'LOG_PREFIX_ALL'])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', outer_class=root_module['ns3::AttributeConstructionList'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase')
## command-line.h (module 'core'): ns3::CommandLine [class]
module.add_class('CommandLine', allow_subclassing=True)
## system-mutex.h (module 'core'): ns3::CriticalSection [class]
module.add_class('CriticalSection')
## event-garbage-collector.h (module 'core'): ns3::EventGarbageCollector [class]
module.add_class('EventGarbageCollector')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId')
## global-value.h (module 'core'): ns3::GlobalValue [class]
module.add_class('GlobalValue')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher')
## int-to-type.h (module 'core'): ns3::IntToType<0> [struct]
module.add_class('IntToType', template_parameters=['0'])
## int-to-type.h (module 'core'): ns3::IntToType<0>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 0 >'])
## int-to-type.h (module 'core'): ns3::IntToType<1> [struct]
module.add_class('IntToType', template_parameters=['1'])
## int-to-type.h (module 'core'): ns3::IntToType<1>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 1 >'])
## int-to-type.h (module 'core'): ns3::IntToType<2> [struct]
module.add_class('IntToType', template_parameters=['2'])
## int-to-type.h (module 'core'): ns3::IntToType<2>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 2 >'])
## int-to-type.h (module 'core'): ns3::IntToType<3> [struct]
module.add_class('IntToType', template_parameters=['3'])
## int-to-type.h (module 'core'): ns3::IntToType<3>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 3 >'])
## int-to-type.h (module 'core'): ns3::IntToType<4> [struct]
module.add_class('IntToType', template_parameters=['4'])
## int-to-type.h (module 'core'): ns3::IntToType<4>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 4 >'])
## int-to-type.h (module 'core'): ns3::IntToType<5> [struct]
module.add_class('IntToType', template_parameters=['5'])
## int-to-type.h (module 'core'): ns3::IntToType<5>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 5 >'])
## int-to-type.h (module 'core'): ns3::IntToType<6> [struct]
module.add_class('IntToType', template_parameters=['6'])
## int-to-type.h (module 'core'): ns3::IntToType<6>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 6 >'])
## log.h (module 'core'): ns3::LogComponent [class]
module.add_class('LogComponent')
## names.h (module 'core'): ns3::Names [class]
module.add_class('Names')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True)
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory')
## random-variable.h (module 'core'): ns3::RandomVariable [class]
module.add_class('RandomVariable')
## random-variable-stream-helper.h (module 'core'): ns3::RandomVariableStreamHelper [class]
module.add_class('RandomVariableStreamHelper')
## rng-seed-manager.h (module 'core'): ns3::RngSeedManager [class]
module.add_class('RngSeedManager')
## rng-stream.h (module 'core'): ns3::RngStream [class]
module.add_class('RngStream')
## random-variable.h (module 'core'): ns3::SequentialVariable [class]
module.add_class('SequentialVariable', parent=root_module['ns3::RandomVariable'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simulator.h (module 'core'): ns3::Simulator [class]
module.add_class('Simulator', destructor_visibility='private')
## system-condition.h (module 'core'): ns3::SystemCondition [class]
module.add_class('SystemCondition')
## system-mutex.h (module 'core'): ns3::SystemMutex [class]
module.add_class('SystemMutex')
## system-wall-clock-ms.h (module 'core'): ns3::SystemWallClockMs [class]
module.add_class('SystemWallClockMs')
## timer.h (module 'core'): ns3::Timer [class]
module.add_class('Timer')
## timer.h (module 'core'): ns3::Timer::DestroyPolicy [enumeration]
module.add_enum('DestroyPolicy', ['CANCEL_ON_DESTROY', 'REMOVE_ON_DESTROY', 'CHECK_ON_DESTROY'], outer_class=root_module['ns3::Timer'])
## timer.h (module 'core'): ns3::Timer::State [enumeration]
module.add_enum('State', ['RUNNING', 'EXPIRED', 'SUSPENDED'], outer_class=root_module['ns3::Timer'])
## timer-impl.h (module 'core'): ns3::TimerImpl [class]
module.add_class('TimerImpl', allow_subclassing=True)
## random-variable.h (module 'core'): ns3::TriangularVariable [class]
module.add_class('TriangularVariable', parent=root_module['ns3::RandomVariable'])
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', outer_class=root_module['ns3::TypeId'])
## random-variable.h (module 'core'): ns3::UniformVariable [class]
module.add_class('UniformVariable', parent=root_module['ns3::RandomVariable'])
## vector.h (module 'core'): ns3::Vector2D [class]
module.add_class('Vector2D')
## vector.h (module 'core'): ns3::Vector3D [class]
module.add_class('Vector3D')
## watchdog.h (module 'core'): ns3::Watchdog [class]
module.add_class('Watchdog')
## random-variable.h (module 'core'): ns3::WeibullVariable [class]
module.add_class('WeibullVariable', parent=root_module['ns3::RandomVariable'])
## random-variable.h (module 'core'): ns3::ZetaVariable [class]
module.add_class('ZetaVariable', parent=root_module['ns3::RandomVariable'])
## random-variable.h (module 'core'): ns3::ZipfVariable [class]
module.add_class('ZipfVariable', parent=root_module['ns3::RandomVariable'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t')
## random-variable.h (module 'core'): ns3::ConstantVariable [class]
module.add_class('ConstantVariable', parent=root_module['ns3::RandomVariable'])
## random-variable.h (module 'core'): ns3::DeterministicVariable [class]
module.add_class('DeterministicVariable', parent=root_module['ns3::RandomVariable'])
## random-variable.h (module 'core'): ns3::EmpiricalVariable [class]
module.add_class('EmpiricalVariable', parent=root_module['ns3::RandomVariable'])
## random-variable.h (module 'core'): ns3::ErlangVariable [class]
module.add_class('ErlangVariable', parent=root_module['ns3::RandomVariable'])
## random-variable.h (module 'core'): ns3::ExponentialVariable [class]
module.add_class('ExponentialVariable', parent=root_module['ns3::RandomVariable'])
## random-variable.h (module 'core'): ns3::GammaVariable [class]
module.add_class('GammaVariable', parent=root_module['ns3::RandomVariable'])
## random-variable.h (module 'core'): ns3::IntEmpiricalVariable [class]
module.add_class('IntEmpiricalVariable', parent=root_module['ns3::EmpiricalVariable'])
## random-variable.h (module 'core'): ns3::LogNormalVariable [class]
module.add_class('LogNormalVariable', parent=root_module['ns3::RandomVariable'])
## random-variable.h (module 'core'): ns3::NormalVariable [class]
module.add_class('NormalVariable', parent=root_module['ns3::RandomVariable'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', outer_class=root_module['ns3::Object'])
## random-variable.h (module 'core'): ns3::ParetoVariable [class]
module.add_class('ParetoVariable', parent=root_module['ns3::RandomVariable'])
## random-variable-stream.h (module 'core'): ns3::RandomVariableStream [class]
module.add_class('RandomVariableStream', parent=root_module['ns3::Object'])
## scheduler.h (module 'core'): ns3::Scheduler [class]
module.add_class('Scheduler', parent=root_module['ns3::Object'])
## scheduler.h (module 'core'): ns3::Scheduler::Event [struct]
module.add_class('Event', outer_class=root_module['ns3::Scheduler'])
## scheduler.h (module 'core'): ns3::Scheduler::EventKey [struct]
module.add_class('EventKey', outer_class=root_module['ns3::Scheduler'])
## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable [class]
module.add_class('SequentialRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::FdReader', 'ns3::empty', 'ns3::DefaultDeleter<ns3::FdReader>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::RefCountBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::RefCountBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::SystemThread', 'ns3::empty', 'ns3::DefaultDeleter<ns3::SystemThread>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simulator-impl.h (module 'core'): ns3::SimulatorImpl [class]
module.add_class('SimulatorImpl', parent=root_module['ns3::Object'])
## synchronizer.h (module 'core'): ns3::Synchronizer [class]
module.add_class('Synchronizer', parent=root_module['ns3::Object'])
## system-thread.h (module 'core'): ns3::SystemThread [class]
module.add_class('SystemThread', parent=root_module['ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >'])
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'])
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable [class]
module.add_class('TriangularRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable [class]
module.add_class('UniformRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## wall-clock-synchronizer.h (module 'core'): ns3::WallClockSynchronizer [class]
module.add_class('WallClockSynchronizer', parent=root_module['ns3::Synchronizer'])
## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable [class]
module.add_class('WeibullRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable [class]
module.add_class('ZetaRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable [class]
module.add_class('ZipfRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## boolean.h (module 'core'): ns3::BooleanChecker [class]
module.add_class('BooleanChecker', parent=root_module['ns3::AttributeChecker'])
## boolean.h (module 'core'): ns3::BooleanValue [class]
module.add_class('BooleanValue', parent=root_module['ns3::AttributeValue'])
## calendar-scheduler.h (module 'core'): ns3::CalendarScheduler [class]
module.add_class('CalendarScheduler', parent=root_module['ns3::Scheduler'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable [class]
module.add_class('ConstantRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## default-simulator-impl.h (module 'core'): ns3::DefaultSimulatorImpl [class]
module.add_class('DefaultSimulatorImpl', parent=root_module['ns3::SimulatorImpl'])
## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable [class]
module.add_class('DeterministicRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## double.h (module 'core'): ns3::DoubleValue [class]
module.add_class('DoubleValue', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable [class]
module.add_class('EmpiricalRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', parent=root_module['ns3::AttributeValue'])
## enum.h (module 'core'): ns3::EnumChecker [class]
module.add_class('EnumChecker', parent=root_module['ns3::AttributeChecker'])
## enum.h (module 'core'): ns3::EnumValue [class]
module.add_class('EnumValue', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable [class]
module.add_class('ErlangRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## event-impl.h (module 'core'): ns3::EventImpl [class]
module.add_class('EventImpl', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable [class]
module.add_class('ExponentialRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## unix-fd-reader.h (module 'core'): ns3::FdReader [class]
module.add_class('FdReader', parent=root_module['ns3::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >'])
## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable [class]
module.add_class('GammaRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## heap-scheduler.h (module 'core'): ns3::HeapScheduler [class]
module.add_class('HeapScheduler', parent=root_module['ns3::Scheduler'])
## integer.h (module 'core'): ns3::IntegerValue [class]
module.add_class('IntegerValue', parent=root_module['ns3::AttributeValue'])
## list-scheduler.h (module 'core'): ns3::ListScheduler [class]
module.add_class('ListScheduler', parent=root_module['ns3::Scheduler'])
## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable [class]
module.add_class('LogNormalRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## map-scheduler.h (module 'core'): ns3::MapScheduler [class]
module.add_class('MapScheduler', parent=root_module['ns3::Scheduler'])
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable [class]
module.add_class('NormalRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', parent=root_module['ns3::AttributeValue'])
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerAccessor [class]
module.add_class('ObjectPtrContainerAccessor', parent=root_module['ns3::AttributeAccessor'])
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerChecker [class]
module.add_class('ObjectPtrContainerChecker', parent=root_module['ns3::AttributeChecker'])
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerValue [class]
module.add_class('ObjectPtrContainerValue', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable [class]
module.add_class('ParetoRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## pointer.h (module 'core'): ns3::PointerChecker [class]
module.add_class('PointerChecker', parent=root_module['ns3::AttributeChecker'])
## pointer.h (module 'core'): ns3::PointerValue [class]
module.add_class('PointerValue', parent=root_module['ns3::AttributeValue'])
## random-variable.h (module 'core'): ns3::RandomVariableChecker [class]
module.add_class('RandomVariableChecker', parent=root_module['ns3::AttributeChecker'])
## random-variable.h (module 'core'): ns3::RandomVariableValue [class]
module.add_class('RandomVariableValue', parent=root_module['ns3::AttributeValue'])
## realtime-simulator-impl.h (module 'core'): ns3::RealtimeSimulatorImpl [class]
module.add_class('RealtimeSimulatorImpl', parent=root_module['ns3::SimulatorImpl'])
## realtime-simulator-impl.h (module 'core'): ns3::RealtimeSimulatorImpl::SynchronizationMode [enumeration]
module.add_enum('SynchronizationMode', ['SYNC_BEST_EFFORT', 'SYNC_HARD_LIMIT'], outer_class=root_module['ns3::RealtimeSimulatorImpl'])
## ref-count-base.h (module 'core'): ns3::RefCountBase [class]
module.add_class('RefCountBase', parent=root_module['ns3::SimpleRefCount< ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> >'])
## string.h (module 'core'): ns3::StringChecker [class]
module.add_class('StringChecker', parent=root_module['ns3::AttributeChecker'])
## string.h (module 'core'): ns3::StringValue [class]
module.add_class('StringValue', parent=root_module['ns3::AttributeValue'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', parent=root_module['ns3::AttributeValue'])
## uinteger.h (module 'core'): ns3::UintegerValue [class]
module.add_class('UintegerValue', parent=root_module['ns3::AttributeValue'])
## vector.h (module 'core'): ns3::Vector2DChecker [class]
module.add_class('Vector2DChecker', parent=root_module['ns3::AttributeChecker'])
## vector.h (module 'core'): ns3::Vector2DValue [class]
module.add_class('Vector2DValue', parent=root_module['ns3::AttributeValue'])
## vector.h (module 'core'): ns3::Vector3DChecker [class]
module.add_class('Vector3DChecker', parent=root_module['ns3::AttributeChecker'])
## vector.h (module 'core'): ns3::Vector3DValue [class]
module.add_class('Vector3DValue', parent=root_module['ns3::AttributeValue'])
typehandlers.add_type_alias('ns3::Vector3D', 'ns3::Vector')
typehandlers.add_type_alias('ns3::Vector3D*', 'ns3::Vector*')
typehandlers.add_type_alias('ns3::Vector3D&', 'ns3::Vector&')
module.add_typedef(root_module['ns3::Vector3D'], 'Vector')
typehandlers.add_type_alias('ns3::ObjectPtrContainerValue', 'ns3::ObjectVectorValue')
typehandlers.add_type_alias('ns3::ObjectPtrContainerValue*', 'ns3::ObjectVectorValue*')
typehandlers.add_type_alias('ns3::ObjectPtrContainerValue&', 'ns3::ObjectVectorValue&')
module.add_typedef(root_module['ns3::ObjectPtrContainerValue'], 'ObjectVectorValue')
typehandlers.add_type_alias('void ( * ) ( std::ostream & ) *', 'ns3::LogTimePrinter')
typehandlers.add_type_alias('void ( * ) ( std::ostream & ) **', 'ns3::LogTimePrinter*')
typehandlers.add_type_alias('void ( * ) ( std::ostream & ) *&', 'ns3::LogTimePrinter&')
typehandlers.add_type_alias('ns3::RngSeedManager', 'ns3::SeedManager')
typehandlers.add_type_alias('ns3::RngSeedManager*', 'ns3::SeedManager*')
typehandlers.add_type_alias('ns3::RngSeedManager&', 'ns3::SeedManager&')
module.add_typedef(root_module['ns3::RngSeedManager'], 'SeedManager')
typehandlers.add_type_alias('void ( * ) ( std::ostream & ) *', 'ns3::LogNodePrinter')
typehandlers.add_type_alias('void ( * ) ( std::ostream & ) **', 'ns3::LogNodePrinter*')
typehandlers.add_type_alias('void ( * ) ( std::ostream & ) *&', 'ns3::LogNodePrinter&')
typehandlers.add_type_alias('ns3::Vector3DValue', 'ns3::VectorValue')
typehandlers.add_type_alias('ns3::Vector3DValue*', 'ns3::VectorValue*')
typehandlers.add_type_alias('ns3::Vector3DValue&', 'ns3::VectorValue&')
module.add_typedef(root_module['ns3::Vector3DValue'], 'VectorValue')
typehandlers.add_type_alias('ns3::ObjectPtrContainerValue', 'ns3::ObjectMapValue')
typehandlers.add_type_alias('ns3::ObjectPtrContainerValue*', 'ns3::ObjectMapValue*')
typehandlers.add_type_alias('ns3::ObjectPtrContainerValue&', 'ns3::ObjectMapValue&')
module.add_typedef(root_module['ns3::ObjectPtrContainerValue'], 'ObjectMapValue')
typehandlers.add_type_alias('ns3::Vector3DChecker', 'ns3::VectorChecker')
typehandlers.add_type_alias('ns3::Vector3DChecker*', 'ns3::VectorChecker*')
typehandlers.add_type_alias('ns3::Vector3DChecker&', 'ns3::VectorChecker&')
module.add_typedef(root_module['ns3::Vector3DChecker'], 'VectorChecker')
## Register a nested module for the namespace CommandLineHelper
nested_module = module.add_cpp_namespace('CommandLineHelper')
register_types_ns3_CommandLineHelper(nested_module)
## Register a nested module for the namespace Config
nested_module = module.add_cpp_namespace('Config')
register_types_ns3_Config(nested_module)
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
## Register a nested module for the namespace SystemPath
nested_module = module.add_cpp_namespace('SystemPath')
register_types_ns3_SystemPath(nested_module)
## Register a nested module for the namespace internal
nested_module = module.add_cpp_namespace('internal')
register_types_ns3_internal(nested_module)
def register_types_ns3_CommandLineHelper(module):
root_module = module.get_root()
def register_types_ns3_Config(module):
root_module = module.get_root()
## config.h (module 'core'): ns3::Config::MatchContainer [class]
module.add_class('MatchContainer')
module.add_container('std::vector< ns3::Ptr< ns3::Object > >', 'ns3::Ptr< ns3::Object >', container_type='vector')
module.add_container('std::vector< std::string >', 'std::string', container_type='vector')
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias('uint32_t ( * ) ( char const *, size_t ) *', 'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias('uint32_t ( * ) ( char const *, size_t ) **', 'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias('uint32_t ( * ) ( char const *, size_t ) *&', 'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias('uint64_t ( * ) ( char const *, size_t ) *', 'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias('uint64_t ( * ) ( char const *, size_t ) **', 'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias('uint64_t ( * ) ( char const *, size_t ) *&', 'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', parent=root_module['ns3::Hash::Implementation'])
def register_types_ns3_SystemPath(module):
root_module = module.get_root()
module.add_container('std::list< std::string >', 'std::string', container_type='list')
def register_types_ns3_internal(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3CommandLine_methods(root_module, root_module['ns3::CommandLine'])
register_Ns3CriticalSection_methods(root_module, root_module['ns3::CriticalSection'])
register_Ns3EventGarbageCollector_methods(root_module, root_module['ns3::EventGarbageCollector'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3GlobalValue_methods(root_module, root_module['ns3::GlobalValue'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3IntToType__0_methods(root_module, root_module['ns3::IntToType< 0 >'])
register_Ns3IntToType__1_methods(root_module, root_module['ns3::IntToType< 1 >'])
register_Ns3IntToType__2_methods(root_module, root_module['ns3::IntToType< 2 >'])
register_Ns3IntToType__3_methods(root_module, root_module['ns3::IntToType< 3 >'])
register_Ns3IntToType__4_methods(root_module, root_module['ns3::IntToType< 4 >'])
register_Ns3IntToType__5_methods(root_module, root_module['ns3::IntToType< 5 >'])
register_Ns3IntToType__6_methods(root_module, root_module['ns3::IntToType< 6 >'])
register_Ns3LogComponent_methods(root_module, root_module['ns3::LogComponent'])
register_Ns3Names_methods(root_module, root_module['ns3::Names'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3RandomVariable_methods(root_module, root_module['ns3::RandomVariable'])
register_Ns3RandomVariableStreamHelper_methods(root_module, root_module['ns3::RandomVariableStreamHelper'])
register_Ns3RngSeedManager_methods(root_module, root_module['ns3::RngSeedManager'])
register_Ns3RngStream_methods(root_module, root_module['ns3::RngStream'])
register_Ns3SequentialVariable_methods(root_module, root_module['ns3::SequentialVariable'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])
register_Ns3SystemCondition_methods(root_module, root_module['ns3::SystemCondition'])
register_Ns3SystemMutex_methods(root_module, root_module['ns3::SystemMutex'])
register_Ns3SystemWallClockMs_methods(root_module, root_module['ns3::SystemWallClockMs'])
register_Ns3Timer_methods(root_module, root_module['ns3::Timer'])
register_Ns3TimerImpl_methods(root_module, root_module['ns3::TimerImpl'])
register_Ns3TriangularVariable_methods(root_module, root_module['ns3::TriangularVariable'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3UniformVariable_methods(root_module, root_module['ns3::UniformVariable'])
register_Ns3Vector2D_methods(root_module, root_module['ns3::Vector2D'])
register_Ns3Vector3D_methods(root_module, root_module['ns3::Vector3D'])
register_Ns3Watchdog_methods(root_module, root_module['ns3::Watchdog'])
register_Ns3WeibullVariable_methods(root_module, root_module['ns3::WeibullVariable'])
register_Ns3ZetaVariable_methods(root_module, root_module['ns3::ZetaVariable'])
register_Ns3ZipfVariable_methods(root_module, root_module['ns3::ZipfVariable'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3ConstantVariable_methods(root_module, root_module['ns3::ConstantVariable'])
register_Ns3DeterministicVariable_methods(root_module, root_module['ns3::DeterministicVariable'])
register_Ns3EmpiricalVariable_methods(root_module, root_module['ns3::EmpiricalVariable'])
register_Ns3ErlangVariable_methods(root_module, root_module['ns3::ErlangVariable'])
register_Ns3ExponentialVariable_methods(root_module, root_module['ns3::ExponentialVariable'])
register_Ns3GammaVariable_methods(root_module, root_module['ns3::GammaVariable'])
register_Ns3IntEmpiricalVariable_methods(root_module, root_module['ns3::IntEmpiricalVariable'])
register_Ns3LogNormalVariable_methods(root_module, root_module['ns3::LogNormalVariable'])
register_Ns3NormalVariable_methods(root_module, root_module['ns3::NormalVariable'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3ParetoVariable_methods(root_module, root_module['ns3::ParetoVariable'])
register_Ns3RandomVariableStream_methods(root_module, root_module['ns3::RandomVariableStream'])
register_Ns3Scheduler_methods(root_module, root_module['ns3::Scheduler'])
register_Ns3SchedulerEvent_methods(root_module, root_module['ns3::Scheduler::Event'])
register_Ns3SchedulerEventKey_methods(root_module, root_module['ns3::Scheduler::EventKey'])
register_Ns3SequentialRandomVariable_methods(root_module, root_module['ns3::SequentialRandomVariable'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3FdReader_Ns3Empty_Ns3DefaultDeleter__lt__ns3FdReader__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3RefCountBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3RefCountBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> >'])
register_Ns3SimpleRefCount__Ns3SystemThread_Ns3Empty_Ns3DefaultDeleter__lt__ns3SystemThread__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3SimulatorImpl_methods(root_module, root_module['ns3::SimulatorImpl'])
register_Ns3Synchronizer_methods(root_module, root_module['ns3::Synchronizer'])
register_Ns3SystemThread_methods(root_module, root_module['ns3::SystemThread'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3TriangularRandomVariable_methods(root_module, root_module['ns3::TriangularRandomVariable'])
register_Ns3UniformRandomVariable_methods(root_module, root_module['ns3::UniformRandomVariable'])
register_Ns3WallClockSynchronizer_methods(root_module, root_module['ns3::WallClockSynchronizer'])
register_Ns3WeibullRandomVariable_methods(root_module, root_module['ns3::WeibullRandomVariable'])
register_Ns3ZetaRandomVariable_methods(root_module, root_module['ns3::ZetaRandomVariable'])
register_Ns3ZipfRandomVariable_methods(root_module, root_module['ns3::ZipfRandomVariable'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3BooleanChecker_methods(root_module, root_module['ns3::BooleanChecker'])
register_Ns3BooleanValue_methods(root_module, root_module['ns3::BooleanValue'])
register_Ns3CalendarScheduler_methods(root_module, root_module['ns3::CalendarScheduler'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3ConstantRandomVariable_methods(root_module, root_module['ns3::ConstantRandomVariable'])
register_Ns3DefaultSimulatorImpl_methods(root_module, root_module['ns3::DefaultSimulatorImpl'])
register_Ns3DeterministicRandomVariable_methods(root_module, root_module['ns3::DeterministicRandomVariable'])
register_Ns3DoubleValue_methods(root_module, root_module['ns3::DoubleValue'])
register_Ns3EmpiricalRandomVariable_methods(root_module, root_module['ns3::EmpiricalRandomVariable'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EnumChecker_methods(root_module, root_module['ns3::EnumChecker'])
register_Ns3EnumValue_methods(root_module, root_module['ns3::EnumValue'])
register_Ns3ErlangRandomVariable_methods(root_module, root_module['ns3::ErlangRandomVariable'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3ExponentialRandomVariable_methods(root_module, root_module['ns3::ExponentialRandomVariable'])
register_Ns3FdReader_methods(root_module, root_module['ns3::FdReader'])
register_Ns3GammaRandomVariable_methods(root_module, root_module['ns3::GammaRandomVariable'])
register_Ns3HeapScheduler_methods(root_module, root_module['ns3::HeapScheduler'])
register_Ns3IntegerValue_methods(root_module, root_module['ns3::IntegerValue'])
register_Ns3ListScheduler_methods(root_module, root_module['ns3::ListScheduler'])
register_Ns3LogNormalRandomVariable_methods(root_module, root_module['ns3::LogNormalRandomVariable'])
register_Ns3MapScheduler_methods(root_module, root_module['ns3::MapScheduler'])
register_Ns3NormalRandomVariable_methods(root_module, root_module['ns3::NormalRandomVariable'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3ObjectPtrContainerAccessor_methods(root_module, root_module['ns3::ObjectPtrContainerAccessor'])
register_Ns3ObjectPtrContainerChecker_methods(root_module, root_module['ns3::ObjectPtrContainerChecker'])
register_Ns3ObjectPtrContainerValue_methods(root_module, root_module['ns3::ObjectPtrContainerValue'])
register_Ns3ParetoRandomVariable_methods(root_module, root_module['ns3::ParetoRandomVariable'])
register_Ns3PointerChecker_methods(root_module, root_module['ns3::PointerChecker'])
register_Ns3PointerValue_methods(root_module, root_module['ns3::PointerValue'])
register_Ns3RandomVariableChecker_methods(root_module, root_module['ns3::RandomVariableChecker'])
register_Ns3RandomVariableValue_methods(root_module, root_module['ns3::RandomVariableValue'])
register_Ns3RealtimeSimulatorImpl_methods(root_module, root_module['ns3::RealtimeSimulatorImpl'])
register_Ns3RefCountBase_methods(root_module, root_module['ns3::RefCountBase'])
register_Ns3StringChecker_methods(root_module, root_module['ns3::StringChecker'])
register_Ns3StringValue_methods(root_module, root_module['ns3::StringValue'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3UintegerValue_methods(root_module, root_module['ns3::UintegerValue'])
register_Ns3Vector2DChecker_methods(root_module, root_module['ns3::Vector2DChecker'])
register_Ns3Vector2DValue_methods(root_module, root_module['ns3::Vector2DValue'])
register_Ns3Vector3DChecker_methods(root_module, root_module['ns3::Vector3DChecker'])
register_Ns3Vector3DValue_methods(root_module, root_module['ns3::Vector3DValue'])
register_Ns3ConfigMatchContainer_methods(root_module, root_module['ns3::Config::MatchContainer'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3CommandLine_methods(root_module, cls):
## command-line.h (module 'core'): ns3::CommandLine::CommandLine() [constructor]
cls.add_constructor([])
## command-line.h (module 'core'): ns3::CommandLine::CommandLine(ns3::CommandLine const & cmd) [copy constructor]
cls.add_constructor([param('ns3::CommandLine const &', 'cmd')])
## command-line.h (module 'core'): void ns3::CommandLine::AddValue(std::string const & name, std::string const & help, ns3::Callback<bool, std::string, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddValue',
'void',
[param('std::string const &', 'name'), param('std::string const &', 'help'), param('ns3::Callback< bool, std::string, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')])
## command-line.h (module 'core'): std::string ns3::CommandLine::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## command-line.h (module 'core'): void ns3::CommandLine::Parse(int argc, char * * argv) [member function]
cls.add_method('Parse',
'void',
[param('int', 'argc'), param('char * *', 'argv')])
## command-line.h (module 'core'): void ns3::CommandLine::Usage(std::string const usage) [member function]
cls.add_method('Usage',
'void',
[param('std::string const', 'usage')])
return
def register_Ns3CriticalSection_methods(root_module, cls):
## system-mutex.h (module 'core'): ns3::CriticalSection::CriticalSection(ns3::CriticalSection const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CriticalSection const &', 'arg0')])
## system-mutex.h (module 'core'): ns3::CriticalSection::CriticalSection(ns3::SystemMutex & mutex) [constructor]
cls.add_constructor([param('ns3::SystemMutex &', 'mutex')])
return
def register_Ns3EventGarbageCollector_methods(root_module, cls):
## event-garbage-collector.h (module 'core'): ns3::EventGarbageCollector::EventGarbageCollector(ns3::EventGarbageCollector const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventGarbageCollector const &', 'arg0')])
## event-garbage-collector.h (module 'core'): ns3::EventGarbageCollector::EventGarbageCollector() [constructor]
cls.add_constructor([])
## event-garbage-collector.h (module 'core'): void ns3::EventGarbageCollector::Track(ns3::EventId event) [member function]
cls.add_method('Track',
'void',
[param('ns3::EventId', 'event')])
return
def register_Ns3EventId_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('==')
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventId const &', 'arg0')])
## event-id.h (module 'core'): ns3::EventId::EventId() [constructor]
cls.add_constructor([])
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])
## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function]
cls.add_method('GetTs',
'uint64_t',
[],
is_const=True)
## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function]
cls.add_method('PeekEventImpl',
'ns3::EventImpl *',
[],
is_const=True)
return
def register_Ns3GlobalValue_methods(root_module, cls):
## global-value.h (module 'core'): ns3::GlobalValue::GlobalValue(ns3::GlobalValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::GlobalValue const &', 'arg0')])
## global-value.h (module 'core'): ns3::GlobalValue::GlobalValue(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeChecker const> checker) [constructor]
cls.add_constructor([param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## global-value.h (module 'core'): static __gnu_cxx::__normal_iterator<ns3::GlobalValue* const*,std::vector<ns3::GlobalValue*, std::allocator<ns3::GlobalValue*> > > ns3::GlobalValue::Begin() [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::GlobalValue * const *, std::vector< ns3::GlobalValue * > >',
[],
is_static=True)
## global-value.h (module 'core'): static void ns3::GlobalValue::Bind(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Bind',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')],
is_static=True)
## global-value.h (module 'core'): static bool ns3::GlobalValue::BindFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('BindFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')],
is_static=True)
## global-value.h (module 'core'): static __gnu_cxx::__normal_iterator<ns3::GlobalValue* const*,std::vector<ns3::GlobalValue*, std::allocator<ns3::GlobalValue*> > > ns3::GlobalValue::End() [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::GlobalValue * const *, std::vector< ns3::GlobalValue * > >',
[],
is_static=True)
## global-value.h (module 'core'): ns3::Ptr<ns3::AttributeChecker const> ns3::GlobalValue::GetChecker() const [member function]
cls.add_method('GetChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[],
is_const=True)
## global-value.h (module 'core'): std::string ns3::GlobalValue::GetHelp() const [member function]
cls.add_method('GetHelp',
'std::string',
[],
is_const=True)
## global-value.h (module 'core'): std::string ns3::GlobalValue::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## global-value.h (module 'core'): void ns3::GlobalValue::GetValue(ns3::AttributeValue & value) const [member function]
cls.add_method('GetValue',
'void',
[param('ns3::AttributeValue &', 'value')],
is_const=True)
## global-value.h (module 'core'): static void ns3::GlobalValue::GetValueByName(std::string name, ns3::AttributeValue & value) [member function]
cls.add_method('GetValueByName',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_static=True)
## global-value.h (module 'core'): static bool ns3::GlobalValue::GetValueByNameFailSafe(std::string name, ns3::AttributeValue & value) [member function]
cls.add_method('GetValueByNameFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_static=True)
## global-value.h (module 'core'): void ns3::GlobalValue::ResetInitialValue() [member function]
cls.add_method('ResetInitialValue',
'void',
[])
## global-value.h (module 'core'): bool ns3::GlobalValue::SetValue(ns3::AttributeValue const & value) [member function]
cls.add_method('SetValue',
'bool',
[param('ns3::AttributeValue const &', 'value')])
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3IntToType__0_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<0>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<0>::IntToType(ns3::IntToType<0> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntToType< 0 > const &', 'arg0')])
return
def register_Ns3IntToType__1_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<1>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<1>::IntToType(ns3::IntToType<1> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntToType< 1 > const &', 'arg0')])
return
def register_Ns3IntToType__2_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<2>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<2>::IntToType(ns3::IntToType<2> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntToType< 2 > const &', 'arg0')])
return
def register_Ns3IntToType__3_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<3>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<3>::IntToType(ns3::IntToType<3> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntToType< 3 > const &', 'arg0')])
return
def register_Ns3IntToType__4_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<4>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<4>::IntToType(ns3::IntToType<4> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntToType< 4 > const &', 'arg0')])
return
def register_Ns3IntToType__5_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<5>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<5>::IntToType(ns3::IntToType<5> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntToType< 5 > const &', 'arg0')])
return
def register_Ns3IntToType__6_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<6>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<6>::IntToType(ns3::IntToType<6> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntToType< 6 > const &', 'arg0')])
return
def register_Ns3LogComponent_methods(root_module, cls):
## log.h (module 'core'): ns3::LogComponent::LogComponent(ns3::LogComponent const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LogComponent const &', 'arg0')])
## log.h (module 'core'): ns3::LogComponent::LogComponent(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## log.h (module 'core'): void ns3::LogComponent::Disable(ns3::LogLevel level) [member function]
cls.add_method('Disable',
'void',
[param('ns3::LogLevel', 'level')])
## log.h (module 'core'): void ns3::LogComponent::Enable(ns3::LogLevel level) [member function]
cls.add_method('Enable',
'void',
[param('ns3::LogLevel', 'level')])
## log.h (module 'core'): void ns3::LogComponent::EnvVarCheck(char const * name) [member function]
cls.add_method('EnvVarCheck',
'void',
[param('char const *', 'name')])
## log.h (module 'core'): std::string ns3::LogComponent::GetLevelLabel(ns3::LogLevel const level) const [member function]
cls.add_method('GetLevelLabel',
'std::string',
[param('ns3::LogLevel const', 'level')],
is_const=True)
## log.h (module 'core'): bool ns3::LogComponent::IsEnabled(ns3::LogLevel level) const [member function]
cls.add_method('IsEnabled',
'bool',
[param('ns3::LogLevel', 'level')],
is_const=True)
## log.h (module 'core'): bool ns3::LogComponent::IsNoneEnabled() const [member function]
cls.add_method('IsNoneEnabled',
'bool',
[],
is_const=True)
## log.h (module 'core'): char const * ns3::LogComponent::Name() const [member function]
cls.add_method('Name',
'char const *',
[],
is_const=True)
return
def register_Ns3Names_methods(root_module, cls):
## names.h (module 'core'): ns3::Names::Names() [constructor]
cls.add_constructor([])
## names.h (module 'core'): ns3::Names::Names(ns3::Names const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Names const &', 'arg0')])
## names.h (module 'core'): static void ns3::Names::Add(std::string name, ns3::Ptr<ns3::Object> object) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::Object >', 'object')],
is_static=True)
## names.h (module 'core'): static void ns3::Names::Add(std::string path, std::string name, ns3::Ptr<ns3::Object> object) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'path'), param('std::string', 'name'), param('ns3::Ptr< ns3::Object >', 'object')],
is_static=True)
## names.h (module 'core'): static void ns3::Names::Add(ns3::Ptr<ns3::Object> context, std::string name, ns3::Ptr<ns3::Object> object) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::Object >', 'context'), param('std::string', 'name'), param('ns3::Ptr< ns3::Object >', 'object')],
is_static=True)
## names.h (module 'core'): static void ns3::Names::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_static=True)
## names.h (module 'core'): static std::string ns3::Names::FindName(ns3::Ptr<ns3::Object> object) [member function]
cls.add_method('FindName',
'std::string',
[param('ns3::Ptr< ns3::Object >', 'object')],
is_static=True)
## names.h (module 'core'): static std::string ns3::Names::FindPath(ns3::Ptr<ns3::Object> object) [member function]
cls.add_method('FindPath',
'std::string',
[param('ns3::Ptr< ns3::Object >', 'object')],
is_static=True)
## names.h (module 'core'): static void ns3::Names::Rename(std::string oldpath, std::string newname) [member function]
cls.add_method('Rename',
'void',
[param('std::string', 'oldpath'), param('std::string', 'newname')],
is_static=True)
## names.h (module 'core'): static void ns3::Names::Rename(std::string path, std::string oldname, std::string newname) [member function]
cls.add_method('Rename',
'void',
[param('std::string', 'path'), param('std::string', 'oldname'), param('std::string', 'newname')],
is_static=True)
## names.h (module 'core'): static void ns3::Names::Rename(ns3::Ptr<ns3::Object> context, std::string oldname, std::string newname) [member function]
cls.add_method('Rename',
'void',
[param('ns3::Ptr< ns3::Object >', 'context'), param('std::string', 'oldname'), param('std::string', 'newname')],
is_static=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]
cls.add_constructor([param('std::string', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3RandomVariable_methods(root_module, cls):
cls.add_output_stream_operator()
## random-variable.h (module 'core'): ns3::RandomVariable::RandomVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::RandomVariable::RandomVariable(ns3::RandomVariable const & o) [copy constructor]
cls.add_constructor([param('ns3::RandomVariable const &', 'o')])
## random-variable.h (module 'core'): uint32_t ns3::RandomVariable::GetInteger() const [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_const=True)
## random-variable.h (module 'core'): double ns3::RandomVariable::GetValue() const [member function]
cls.add_method('GetValue',
'double',
[],
is_const=True)
return
def register_Ns3RandomVariableStreamHelper_methods(root_module, cls):
## random-variable-stream-helper.h (module 'core'): ns3::RandomVariableStreamHelper::RandomVariableStreamHelper() [constructor]
cls.add_constructor([])
## random-variable-stream-helper.h (module 'core'): ns3::RandomVariableStreamHelper::RandomVariableStreamHelper(ns3::RandomVariableStreamHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RandomVariableStreamHelper const &', 'arg0')])
## random-variable-stream-helper.h (module 'core'): static int64_t ns3::RandomVariableStreamHelper::AssignStreams(std::string path, int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('std::string', 'path'), param('int64_t', 'stream')],
is_static=True)
return
def register_Ns3RngSeedManager_methods(root_module, cls):
## rng-seed-manager.h (module 'core'): ns3::RngSeedManager::RngSeedManager() [constructor]
cls.add_constructor([])
## rng-seed-manager.h (module 'core'): ns3::RngSeedManager::RngSeedManager(ns3::RngSeedManager const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RngSeedManager const &', 'arg0')])
## rng-seed-manager.h (module 'core'): static uint64_t ns3::RngSeedManager::GetNextStreamIndex() [member function]
cls.add_method('GetNextStreamIndex',
'uint64_t',
[],
is_static=True)
## rng-seed-manager.h (module 'core'): static uint64_t ns3::RngSeedManager::GetRun() [member function]
cls.add_method('GetRun',
'uint64_t',
[],
is_static=True)
## rng-seed-manager.h (module 'core'): static uint32_t ns3::RngSeedManager::GetSeed() [member function]
cls.add_method('GetSeed',
'uint32_t',
[],
is_static=True)
## rng-seed-manager.h (module 'core'): static void ns3::RngSeedManager::SetRun(uint64_t run) [member function]
cls.add_method('SetRun',
'void',
[param('uint64_t', 'run')],
is_static=True)
## rng-seed-manager.h (module 'core'): static void ns3::RngSeedManager::SetSeed(uint32_t seed) [member function]
cls.add_method('SetSeed',
'void',
[param('uint32_t', 'seed')],
is_static=True)
return
def register_Ns3RngStream_methods(root_module, cls):
## rng-stream.h (module 'core'): ns3::RngStream::RngStream(uint32_t seed, uint64_t stream, uint64_t substream) [constructor]
cls.add_constructor([param('uint32_t', 'seed'), param('uint64_t', 'stream'), param('uint64_t', 'substream')])
## rng-stream.h (module 'core'): ns3::RngStream::RngStream(ns3::RngStream const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RngStream const &', 'arg0')])
## rng-stream.h (module 'core'): double ns3::RngStream::RandU01() [member function]
cls.add_method('RandU01',
'double',
[])
return
def register_Ns3SequentialVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::SequentialVariable::SequentialVariable(ns3::SequentialVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SequentialVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::SequentialVariable::SequentialVariable(double f, double l, double i=1, uint32_t c=1) [constructor]
cls.add_constructor([param('double', 'f'), param('double', 'l'), param('double', 'i', default_value='1'), param('uint32_t', 'c', default_value='1')])
## random-variable.h (module 'core'): ns3::SequentialVariable::SequentialVariable(double f, double l, ns3::RandomVariable const & i, uint32_t c=1) [constructor]
cls.add_constructor([param('double', 'f'), param('double', 'l'), param('ns3::RandomVariable const &', 'i'), param('uint32_t', 'c', default_value='1')])
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Simulator_methods(root_module, cls):
## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Simulator const &', 'arg0')])
## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function]
cls.add_method('GetImplementation',
'ns3::Ptr< ns3::SimulatorImpl >',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function]
cls.add_method('IsFinished',
'bool',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function]
cls.add_method('SetImplementation',
'void',
[param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & time) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'time')],
is_static=True)
return
def register_Ns3SystemCondition_methods(root_module, cls):
## system-condition.h (module 'core'): ns3::SystemCondition::SystemCondition(ns3::SystemCondition const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SystemCondition const &', 'arg0')])
## system-condition.h (module 'core'): ns3::SystemCondition::SystemCondition() [constructor]
cls.add_constructor([])
## system-condition.h (module 'core'): void ns3::SystemCondition::Broadcast() [member function]
cls.add_method('Broadcast',
'void',
[])
## system-condition.h (module 'core'): bool ns3::SystemCondition::GetCondition() [member function]
cls.add_method('GetCondition',
'bool',
[])
## system-condition.h (module 'core'): void ns3::SystemCondition::SetCondition(bool condition) [member function]
cls.add_method('SetCondition',
'void',
[param('bool', 'condition')])
## system-condition.h (module 'core'): void ns3::SystemCondition::Signal() [member function]
cls.add_method('Signal',
'void',
[])
## system-condition.h (module 'core'): bool ns3::SystemCondition::TimedWait(uint64_t ns) [member function]
cls.add_method('TimedWait',
'bool',
[param('uint64_t', 'ns')])
## system-condition.h (module 'core'): void ns3::SystemCondition::Wait() [member function]
cls.add_method('Wait',
'void',
[])
return
def register_Ns3SystemMutex_methods(root_module, cls):
## system-mutex.h (module 'core'): ns3::SystemMutex::SystemMutex(ns3::SystemMutex const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SystemMutex const &', 'arg0')])
## system-mutex.h (module 'core'): ns3::SystemMutex::SystemMutex() [constructor]
cls.add_constructor([])
## system-mutex.h (module 'core'): void ns3::SystemMutex::Lock() [member function]
cls.add_method('Lock',
'void',
[])
## system-mutex.h (module 'core'): void ns3::SystemMutex::Unlock() [member function]
cls.add_method('Unlock',
'void',
[])
return
def register_Ns3SystemWallClockMs_methods(root_module, cls):
## system-wall-clock-ms.h (module 'core'): ns3::SystemWallClockMs::SystemWallClockMs(ns3::SystemWallClockMs const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SystemWallClockMs const &', 'arg0')])
## system-wall-clock-ms.h (module 'core'): ns3::SystemWallClockMs::SystemWallClockMs() [constructor]
cls.add_constructor([])
## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::End() [member function]
cls.add_method('End',
'int64_t',
[])
## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::GetElapsedReal() const [member function]
cls.add_method('GetElapsedReal',
'int64_t',
[],
is_const=True)
## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::GetElapsedSystem() const [member function]
cls.add_method('GetElapsedSystem',
'int64_t',
[],
is_const=True)
## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::GetElapsedUser() const [member function]
cls.add_method('GetElapsedUser',
'int64_t',
[],
is_const=True)
## system-wall-clock-ms.h (module 'core'): void ns3::SystemWallClockMs::Start() [member function]
cls.add_method('Start',
'void',
[])
return
def register_Ns3Timer_methods(root_module, cls):
## timer.h (module 'core'): ns3::Timer::Timer(ns3::Timer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Timer const &', 'arg0')])
## timer.h (module 'core'): ns3::Timer::Timer() [constructor]
cls.add_constructor([])
## timer.h (module 'core'): ns3::Timer::Timer(ns3::Timer::DestroyPolicy destroyPolicy) [constructor]
cls.add_constructor([param('ns3::Timer::DestroyPolicy', 'destroyPolicy')])
## timer.h (module 'core'): void ns3::Timer::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## timer.h (module 'core'): ns3::Time ns3::Timer::GetDelay() const [member function]
cls.add_method('GetDelay',
'ns3::Time',
[],
is_const=True)
## timer.h (module 'core'): ns3::Time ns3::Timer::GetDelayLeft() const [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[],
is_const=True)
## timer.h (module 'core'): ns3::Timer::State ns3::Timer::GetState() const [member function]
cls.add_method('GetState',
'ns3::Timer::State',
[],
is_const=True)
## timer.h (module 'core'): bool ns3::Timer::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## timer.h (module 'core'): bool ns3::Timer::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## timer.h (module 'core'): bool ns3::Timer::IsSuspended() const [member function]
cls.add_method('IsSuspended',
'bool',
[],
is_const=True)
## timer.h (module 'core'): void ns3::Timer::Remove() [member function]
cls.add_method('Remove',
'void',
[])
## timer.h (module 'core'): void ns3::Timer::Resume() [member function]
cls.add_method('Resume',
'void',
[])
## timer.h (module 'core'): void ns3::Timer::Schedule() [member function]
cls.add_method('Schedule',
'void',
[])
## timer.h (module 'core'): void ns3::Timer::Schedule(ns3::Time delay) [member function]
cls.add_method('Schedule',
'void',
[param('ns3::Time', 'delay')])
## timer.h (module 'core'): void ns3::Timer::SetDelay(ns3::Time const & delay) [member function]
cls.add_method('SetDelay',
'void',
[param('ns3::Time const &', 'delay')])
## timer.h (module 'core'): void ns3::Timer::Suspend() [member function]
cls.add_method('Suspend',
'void',
[])
return
def register_Ns3TimerImpl_methods(root_module, cls):
## timer-impl.h (module 'core'): ns3::TimerImpl::TimerImpl() [constructor]
cls.add_constructor([])
## timer-impl.h (module 'core'): ns3::TimerImpl::TimerImpl(ns3::TimerImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimerImpl const &', 'arg0')])
## timer-impl.h (module 'core'): void ns3::TimerImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## timer-impl.h (module 'core'): ns3::EventId ns3::TimerImpl::Schedule(ns3::Time const & delay) [member function]
cls.add_method('Schedule',
'ns3::EventId',
[param('ns3::Time const &', 'delay')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3TriangularVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::TriangularVariable::TriangularVariable(ns3::TriangularVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TriangularVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::TriangularVariable::TriangularVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::TriangularVariable::TriangularVariable(double s, double l, double mean) [constructor]
cls.add_constructor([param('double', 's'), param('double', 'l'), param('double', 'mean')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3UniformVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::UniformVariable::UniformVariable(ns3::UniformVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UniformVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::UniformVariable::UniformVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::UniformVariable::UniformVariable(double s, double l) [constructor]
cls.add_constructor([param('double', 's'), param('double', 'l')])
## random-variable.h (module 'core'): uint32_t ns3::UniformVariable::GetInteger(uint32_t s, uint32_t l) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 's'), param('uint32_t', 'l')])
## random-variable.h (module 'core'): double ns3::UniformVariable::GetValue() const [member function]
cls.add_method('GetValue',
'double',
[],
is_const=True)
## random-variable.h (module 'core'): double ns3::UniformVariable::GetValue(double s, double l) [member function]
cls.add_method('GetValue',
'double',
[param('double', 's'), param('double', 'l')])
return
def register_Ns3Vector2D_methods(root_module, cls):
cls.add_output_stream_operator()
## vector.h (module 'core'): ns3::Vector2D::Vector2D(ns3::Vector2D const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector2D::Vector2D(double _x, double _y) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y')])
## vector.h (module 'core'): ns3::Vector2D::Vector2D() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector2D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
return
def register_Ns3Vector3D_methods(root_module, cls):
cls.add_output_stream_operator()
## vector.h (module 'core'): ns3::Vector3D::Vector3D(ns3::Vector3D const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector3D::Vector3D(double _x, double _y, double _z) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y'), param('double', '_z')])
## vector.h (module 'core'): ns3::Vector3D::Vector3D() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector3D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector3D::z [variable]
cls.add_instance_attribute('z', 'double', is_const=False)
return
def register_Ns3Watchdog_methods(root_module, cls):
## watchdog.h (module 'core'): ns3::Watchdog::Watchdog(ns3::Watchdog const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Watchdog const &', 'arg0')])
## watchdog.h (module 'core'): ns3::Watchdog::Watchdog() [constructor]
cls.add_constructor([])
## watchdog.h (module 'core'): void ns3::Watchdog::Ping(ns3::Time delay) [member function]
cls.add_method('Ping',
'void',
[param('ns3::Time', 'delay')])
return
def register_Ns3WeibullVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::WeibullVariable::WeibullVariable(ns3::WeibullVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::WeibullVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::WeibullVariable::WeibullVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::WeibullVariable::WeibullVariable(double m) [constructor]
cls.add_constructor([param('double', 'm')])
## random-variable.h (module 'core'): ns3::WeibullVariable::WeibullVariable(double m, double s) [constructor]
cls.add_constructor([param('double', 'm'), param('double', 's')])
## random-variable.h (module 'core'): ns3::WeibullVariable::WeibullVariable(double m, double s, double b) [constructor]
cls.add_constructor([param('double', 'm'), param('double', 's'), param('double', 'b')])
return
def register_Ns3ZetaVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::ZetaVariable::ZetaVariable(ns3::ZetaVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ZetaVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::ZetaVariable::ZetaVariable(double alpha) [constructor]
cls.add_constructor([param('double', 'alpha')])
## random-variable.h (module 'core'): ns3::ZetaVariable::ZetaVariable() [constructor]
cls.add_constructor([])
return
def register_Ns3ZipfVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::ZipfVariable::ZipfVariable(ns3::ZipfVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ZipfVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::ZipfVariable::ZipfVariable(long int N, double alpha) [constructor]
cls.add_constructor([param('long int', 'N'), param('double', 'alpha')])
## random-variable.h (module 'core'): ns3::ZipfVariable::ZipfVariable() [constructor]
cls.add_constructor([])
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
return
def register_Ns3ConstantVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::ConstantVariable::ConstantVariable(ns3::ConstantVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ConstantVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::ConstantVariable::ConstantVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::ConstantVariable::ConstantVariable(double c) [constructor]
cls.add_constructor([param('double', 'c')])
## random-variable.h (module 'core'): void ns3::ConstantVariable::SetConstant(double c) [member function]
cls.add_method('SetConstant',
'void',
[param('double', 'c')])
return
def register_Ns3DeterministicVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::DeterministicVariable::DeterministicVariable(ns3::DeterministicVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DeterministicVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::DeterministicVariable::DeterministicVariable(double * d, uint32_t c) [constructor]
cls.add_constructor([param('double *', 'd'), param('uint32_t', 'c')])
return
def register_Ns3EmpiricalVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::EmpiricalVariable::EmpiricalVariable(ns3::EmpiricalVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmpiricalVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::EmpiricalVariable::EmpiricalVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): void ns3::EmpiricalVariable::CDF(double v, double c) [member function]
cls.add_method('CDF',
'void',
[param('double', 'v'), param('double', 'c')])
return
def register_Ns3ErlangVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::ErlangVariable::ErlangVariable(ns3::ErlangVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ErlangVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::ErlangVariable::ErlangVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::ErlangVariable::ErlangVariable(unsigned int k, double lambda) [constructor]
cls.add_constructor([param('unsigned int', 'k'), param('double', 'lambda')])
## random-variable.h (module 'core'): double ns3::ErlangVariable::GetValue() const [member function]
cls.add_method('GetValue',
'double',
[],
is_const=True)
## random-variable.h (module 'core'): double ns3::ErlangVariable::GetValue(unsigned int k, double lambda) const [member function]
cls.add_method('GetValue',
'double',
[param('unsigned int', 'k'), param('double', 'lambda')],
is_const=True)
return
def register_Ns3ExponentialVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::ExponentialVariable::ExponentialVariable(ns3::ExponentialVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ExponentialVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::ExponentialVariable::ExponentialVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::ExponentialVariable::ExponentialVariable(double m) [constructor]
cls.add_constructor([param('double', 'm')])
## random-variable.h (module 'core'): ns3::ExponentialVariable::ExponentialVariable(double m, double b) [constructor]
cls.add_constructor([param('double', 'm'), param('double', 'b')])
return
def register_Ns3GammaVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::GammaVariable::GammaVariable(ns3::GammaVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::GammaVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::GammaVariable::GammaVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::GammaVariable::GammaVariable(double alpha, double beta) [constructor]
cls.add_constructor([param('double', 'alpha'), param('double', 'beta')])
## random-variable.h (module 'core'): double ns3::GammaVariable::GetValue() const [member function]
cls.add_method('GetValue',
'double',
[],
is_const=True)
## random-variable.h (module 'core'): double ns3::GammaVariable::GetValue(double alpha, double beta) const [member function]
cls.add_method('GetValue',
'double',
[param('double', 'alpha'), param('double', 'beta')],
is_const=True)
return
def register_Ns3IntEmpiricalVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::IntEmpiricalVariable::IntEmpiricalVariable(ns3::IntEmpiricalVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntEmpiricalVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::IntEmpiricalVariable::IntEmpiricalVariable() [constructor]
cls.add_constructor([])
return
def register_Ns3LogNormalVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::LogNormalVariable::LogNormalVariable(ns3::LogNormalVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LogNormalVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::LogNormalVariable::LogNormalVariable(double mu, double sigma) [constructor]
cls.add_constructor([param('double', 'mu'), param('double', 'sigma')])
return
def register_Ns3NormalVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::NormalVariable::NormalVariable(ns3::NormalVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NormalVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::NormalVariable::NormalVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::NormalVariable::NormalVariable(double m, double v) [constructor]
cls.add_constructor([param('double', 'm'), param('double', 'v')])
## random-variable.h (module 'core'): ns3::NormalVariable::NormalVariable(double m, double v, double b) [constructor]
cls.add_constructor([param('double', 'm'), param('double', 'v'), param('double', 'b')])
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object> ns3::Object::GetObject(ns3::TypeId tid) const [member function]
cls.add_method('GetObject',
'ns3::Ptr< ns3::Object >',
[param('ns3::TypeId', 'tid')],
is_const=True, template_parameters=['ns3::Object'], custom_template_method_name='GetObject')
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3ParetoVariable_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(ns3::ParetoVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ParetoVariable const &', 'arg0')])
## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(double m) [constructor]
cls.add_constructor([param('double', 'm')])
## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(double m, double s) [constructor]
cls.add_constructor([param('double', 'm'), param('double', 's')])
## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(double m, double s, double b) [constructor]
cls.add_constructor([param('double', 'm'), param('double', 's'), param('double', 'b')])
## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(std::pair<double,double> params) [constructor]
cls.add_constructor([param('std::pair< double, double >', 'params')])
## random-variable.h (module 'core'): ns3::ParetoVariable::ParetoVariable(std::pair<double,double> params, double b) [constructor]
cls.add_constructor([param('std::pair< double, double >', 'params'), param('double', 'b')])
return
def register_Ns3RandomVariableStream_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::RandomVariableStream::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::RandomVariableStream::RandomVariableStream() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetStream(int64_t stream) [member function]
cls.add_method('SetStream',
'void',
[param('int64_t', 'stream')])
## random-variable-stream.h (module 'core'): int64_t ns3::RandomVariableStream::GetStream() const [member function]
cls.add_method('GetStream',
'int64_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetAntithetic(bool isAntithetic) [member function]
cls.add_method('SetAntithetic',
'void',
[param('bool', 'isAntithetic')])
## random-variable-stream.h (module 'core'): bool ns3::RandomVariableStream::IsAntithetic() const [member function]
cls.add_method('IsAntithetic',
'bool',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::RandomVariableStream::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_pure_virtual=True, is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::RandomVariableStream::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_pure_virtual=True, is_virtual=True)
## random-variable-stream.h (module 'core'): ns3::RngStream * ns3::RandomVariableStream::Peek() const [member function]
cls.add_method('Peek',
'ns3::RngStream *',
[],
is_const=True, visibility='protected')
return
def register_Ns3Scheduler_methods(root_module, cls):
## scheduler.h (module 'core'): ns3::Scheduler::Scheduler() [constructor]
cls.add_constructor([])
## scheduler.h (module 'core'): ns3::Scheduler::Scheduler(ns3::Scheduler const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Scheduler const &', 'arg0')])
## scheduler.h (module 'core'): static ns3::TypeId ns3::Scheduler::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## scheduler.h (module 'core'): void ns3::Scheduler::Insert(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_pure_virtual=True, is_virtual=True)
## scheduler.h (module 'core'): bool ns3::Scheduler::IsEmpty() const [member function]
cls.add_method('IsEmpty',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## scheduler.h (module 'core'): ns3::Scheduler::Event ns3::Scheduler::PeekNext() const [member function]
cls.add_method('PeekNext',
'ns3::Scheduler::Event',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## scheduler.h (module 'core'): void ns3::Scheduler::Remove(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_pure_virtual=True, is_virtual=True)
## scheduler.h (module 'core'): ns3::Scheduler::Event ns3::Scheduler::RemoveNext() [member function]
cls.add_method('RemoveNext',
'ns3::Scheduler::Event',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3SchedulerEvent_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
## scheduler.h (module 'core'): ns3::Scheduler::Event::Event() [constructor]
cls.add_constructor([])
## scheduler.h (module 'core'): ns3::Scheduler::Event::Event(ns3::Scheduler::Event const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Scheduler::Event const &', 'arg0')])
## scheduler.h (module 'core'): ns3::Scheduler::Event::impl [variable]
cls.add_instance_attribute('impl', 'ns3::EventImpl *', is_const=False)
## scheduler.h (module 'core'): ns3::Scheduler::Event::key [variable]
cls.add_instance_attribute('key', 'ns3::Scheduler::EventKey', is_const=False)
return
def register_Ns3SchedulerEventKey_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
## scheduler.h (module 'core'): ns3::Scheduler::EventKey::EventKey() [constructor]
cls.add_constructor([])
## scheduler.h (module 'core'): ns3::Scheduler::EventKey::EventKey(ns3::Scheduler::EventKey const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Scheduler::EventKey const &', 'arg0')])
## scheduler.h (module 'core'): ns3::Scheduler::EventKey::m_context [variable]
cls.add_instance_attribute('m_context', 'uint32_t', is_const=False)
## scheduler.h (module 'core'): ns3::Scheduler::EventKey::m_ts [variable]
cls.add_instance_attribute('m_ts', 'uint64_t', is_const=False)
## scheduler.h (module 'core'): ns3::Scheduler::EventKey::m_uid [variable]
cls.add_instance_attribute('m_uid', 'uint32_t', is_const=False)
return
def register_Ns3SequentialRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::SequentialRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable::SequentialRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): ns3::Ptr<ns3::RandomVariableStream> ns3::SequentialRandomVariable::GetIncrement() const [member function]
cls.add_method('GetIncrement',
'ns3::Ptr< ns3::RandomVariableStream >',
[],
is_const=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetConsecutive() const [member function]
cls.add_method('GetConsecutive',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3FdReader_Ns3Empty_Ns3DefaultDeleter__lt__ns3FdReader__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >::SimpleRefCount(ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter< ns3::FdReader > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3RefCountBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3RefCountBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter< ns3::RefCountBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3SystemThread_Ns3Empty_Ns3DefaultDeleter__lt__ns3SystemThread__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::SimpleRefCount(ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter< ns3::SystemThread > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimulatorImpl_methods(root_module, cls):
## simulator-impl.h (module 'core'): ns3::SimulatorImpl::SimulatorImpl() [constructor]
cls.add_constructor([])
## simulator-impl.h (module 'core'): ns3::SimulatorImpl::SimulatorImpl(ns3::SimulatorImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SimulatorImpl const &', 'arg0')])
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::Cancel(ns3::EventId const & ev) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'ev')],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): uint32_t ns3::SimulatorImpl::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): ns3::Time ns3::SimulatorImpl::GetDelayLeft(ns3::EventId const & id) const [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): ns3::Time ns3::SimulatorImpl::GetMaximumSimulationTime() const [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): uint32_t ns3::SimulatorImpl::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): static ns3::TypeId ns3::SimulatorImpl::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## simulator-impl.h (module 'core'): bool ns3::SimulatorImpl::IsExpired(ns3::EventId const & ev) const [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'ev')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): bool ns3::SimulatorImpl::IsFinished() const [member function]
cls.add_method('IsFinished',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): ns3::Time ns3::SimulatorImpl::Now() const [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::Remove(ns3::EventId const & ev) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'ev')],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::Run() [member function]
cls.add_method('Run',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): ns3::EventId ns3::SimulatorImpl::Schedule(ns3::Time const & time, ns3::EventImpl * event) [member function]
cls.add_method('Schedule',
'ns3::EventId',
[param('ns3::Time const &', 'time'), param('ns3::EventImpl *', 'event')],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): ns3::EventId ns3::SimulatorImpl::ScheduleDestroy(ns3::EventImpl * event) [member function]
cls.add_method('ScheduleDestroy',
'ns3::EventId',
[param('ns3::EventImpl *', 'event')],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): ns3::EventId ns3::SimulatorImpl::ScheduleNow(ns3::EventImpl * event) [member function]
cls.add_method('ScheduleNow',
'ns3::EventId',
[param('ns3::EventImpl *', 'event')],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::ScheduleWithContext(uint32_t context, ns3::Time const & time, ns3::EventImpl * event) [member function]
cls.add_method('ScheduleWithContext',
'void',
[param('uint32_t', 'context'), param('ns3::Time const &', 'time'), param('ns3::EventImpl *', 'event')],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::Stop(ns3::Time const & time) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'time')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3Synchronizer_methods(root_module, cls):
## synchronizer.h (module 'core'): ns3::Synchronizer::Synchronizer(ns3::Synchronizer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Synchronizer const &', 'arg0')])
## synchronizer.h (module 'core'): ns3::Synchronizer::Synchronizer() [constructor]
cls.add_constructor([])
## synchronizer.h (module 'core'): uint64_t ns3::Synchronizer::EventEnd() [member function]
cls.add_method('EventEnd',
'uint64_t',
[])
## synchronizer.h (module 'core'): void ns3::Synchronizer::EventStart() [member function]
cls.add_method('EventStart',
'void',
[])
## synchronizer.h (module 'core'): uint64_t ns3::Synchronizer::GetCurrentRealtime() [member function]
cls.add_method('GetCurrentRealtime',
'uint64_t',
[])
## synchronizer.h (module 'core'): int64_t ns3::Synchronizer::GetDrift(uint64_t ts) [member function]
cls.add_method('GetDrift',
'int64_t',
[param('uint64_t', 'ts')])
## synchronizer.h (module 'core'): uint64_t ns3::Synchronizer::GetOrigin() [member function]
cls.add_method('GetOrigin',
'uint64_t',
[])
## synchronizer.h (module 'core'): static ns3::TypeId ns3::Synchronizer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## synchronizer.h (module 'core'): bool ns3::Synchronizer::Realtime() [member function]
cls.add_method('Realtime',
'bool',
[])
## synchronizer.h (module 'core'): void ns3::Synchronizer::SetCondition(bool arg0) [member function]
cls.add_method('SetCondition',
'void',
[param('bool', 'arg0')])
## synchronizer.h (module 'core'): void ns3::Synchronizer::SetOrigin(uint64_t ts) [member function]
cls.add_method('SetOrigin',
'void',
[param('uint64_t', 'ts')])
## synchronizer.h (module 'core'): void ns3::Synchronizer::Signal() [member function]
cls.add_method('Signal',
'void',
[])
## synchronizer.h (module 'core'): bool ns3::Synchronizer::Synchronize(uint64_t tsCurrent, uint64_t tsDelay) [member function]
cls.add_method('Synchronize',
'bool',
[param('uint64_t', 'tsCurrent'), param('uint64_t', 'tsDelay')])
## synchronizer.h (module 'core'): uint64_t ns3::Synchronizer::DoEventEnd() [member function]
cls.add_method('DoEventEnd',
'uint64_t',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
## synchronizer.h (module 'core'): void ns3::Synchronizer::DoEventStart() [member function]
cls.add_method('DoEventStart',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
## synchronizer.h (module 'core'): uint64_t ns3::Synchronizer::DoGetCurrentRealtime() [member function]
cls.add_method('DoGetCurrentRealtime',
'uint64_t',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
## synchronizer.h (module 'core'): int64_t ns3::Synchronizer::DoGetDrift(uint64_t ns) [member function]
cls.add_method('DoGetDrift',
'int64_t',
[param('uint64_t', 'ns')],
is_pure_virtual=True, visibility='protected', is_virtual=True)
## synchronizer.h (module 'core'): bool ns3::Synchronizer::DoRealtime() [member function]
cls.add_method('DoRealtime',
'bool',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
## synchronizer.h (module 'core'): void ns3::Synchronizer::DoSetCondition(bool arg0) [member function]
cls.add_method('DoSetCondition',
'void',
[param('bool', 'arg0')],
is_pure_virtual=True, visibility='protected', is_virtual=True)
## synchronizer.h (module 'core'): void ns3::Synchronizer::DoSetOrigin(uint64_t ns) [member function]
cls.add_method('DoSetOrigin',
'void',
[param('uint64_t', 'ns')],
is_pure_virtual=True, visibility='protected', is_virtual=True)
## synchronizer.h (module 'core'): void ns3::Synchronizer::DoSignal() [member function]
cls.add_method('DoSignal',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
## synchronizer.h (module 'core'): bool ns3::Synchronizer::DoSynchronize(uint64_t nsCurrent, uint64_t nsDelay) [member function]
cls.add_method('DoSynchronize',
'bool',
[param('uint64_t', 'nsCurrent'), param('uint64_t', 'nsDelay')],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3SystemThread_methods(root_module, cls):
## system-thread.h (module 'core'): ns3::SystemThread::SystemThread(ns3::SystemThread const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SystemThread const &', 'arg0')])
## system-thread.h (module 'core'): ns3::SystemThread::SystemThread(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [constructor]
cls.add_constructor([param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')])
## system-thread.h (module 'core'): static bool ns3::SystemThread::Equals(pthread_t id) [member function]
cls.add_method('Equals',
'bool',
[param('pthread_t', 'id')],
is_static=True)
## system-thread.h (module 'core'): void ns3::SystemThread::Join() [member function]
cls.add_method('Join',
'void',
[])
## system-thread.h (module 'core'): static pthread_t ns3::SystemThread::Self() [member function]
cls.add_method('Self',
'pthread_t',
[],
is_static=True)
## system-thread.h (module 'core'): void ns3::SystemThread::Start() [member function]
cls.add_method('Start',
'void',
[])
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & value) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'value')])
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & from, ns3::Time::Unit timeUnit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'from'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function]
cls.add_method('Max',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function]
cls.add_method('Min',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TriangularRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::TriangularRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable::TriangularRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue(double mean, double min, double max) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'min'), param('double', 'max')])
## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger(uint32_t mean, uint32_t min, uint32_t max) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'min'), param('uint32_t', 'max')])
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3UniformRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::UniformRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable::UniformRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue(double min, double max) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'min'), param('double', 'max')])
## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger(uint32_t min, uint32_t max) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'min'), param('uint32_t', 'max')])
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3WallClockSynchronizer_methods(root_module, cls):
## wall-clock-synchronizer.h (module 'core'): ns3::WallClockSynchronizer::WallClockSynchronizer(ns3::WallClockSynchronizer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::WallClockSynchronizer const &', 'arg0')])
## wall-clock-synchronizer.h (module 'core'): ns3::WallClockSynchronizer::WallClockSynchronizer() [constructor]
cls.add_constructor([])
## wall-clock-synchronizer.h (module 'core'): ns3::WallClockSynchronizer::NS_PER_SEC [variable]
cls.add_static_attribute('NS_PER_SEC', 'uint64_t const', is_const=True)
## wall-clock-synchronizer.h (module 'core'): ns3::WallClockSynchronizer::US_PER_NS [variable]
cls.add_static_attribute('US_PER_NS', 'uint64_t const', is_const=True)
## wall-clock-synchronizer.h (module 'core'): ns3::WallClockSynchronizer::US_PER_SEC [variable]
cls.add_static_attribute('US_PER_SEC', 'uint64_t const', is_const=True)
## wall-clock-synchronizer.h (module 'core'): uint64_t ns3::WallClockSynchronizer::DoEventEnd() [member function]
cls.add_method('DoEventEnd',
'uint64_t',
[],
visibility='protected', is_virtual=True)
## wall-clock-synchronizer.h (module 'core'): void ns3::WallClockSynchronizer::DoEventStart() [member function]
cls.add_method('DoEventStart',
'void',
[],
visibility='protected', is_virtual=True)
## wall-clock-synchronizer.h (module 'core'): uint64_t ns3::WallClockSynchronizer::DoGetCurrentRealtime() [member function]
cls.add_method('DoGetCurrentRealtime',
'uint64_t',
[],
visibility='protected', is_virtual=True)
## wall-clock-synchronizer.h (module 'core'): int64_t ns3::WallClockSynchronizer::DoGetDrift(uint64_t ns) [member function]
cls.add_method('DoGetDrift',
'int64_t',
[param('uint64_t', 'ns')],
visibility='protected', is_virtual=True)
## wall-clock-synchronizer.h (module 'core'): bool ns3::WallClockSynchronizer::DoRealtime() [member function]
cls.add_method('DoRealtime',
'bool',
[],
visibility='protected', is_virtual=True)
## wall-clock-synchronizer.h (module 'core'): void ns3::WallClockSynchronizer::DoSetCondition(bool cond) [member function]
cls.add_method('DoSetCondition',
'void',
[param('bool', 'cond')],
visibility='protected', is_virtual=True)
## wall-clock-synchronizer.h (module 'core'): void ns3::WallClockSynchronizer::DoSetOrigin(uint64_t ns) [member function]
cls.add_method('DoSetOrigin',
'void',
[param('uint64_t', 'ns')],
visibility='protected', is_virtual=True)
## wall-clock-synchronizer.h (module 'core'): void ns3::WallClockSynchronizer::DoSignal() [member function]
cls.add_method('DoSignal',
'void',
[],
visibility='protected', is_virtual=True)
## wall-clock-synchronizer.h (module 'core'): bool ns3::WallClockSynchronizer::DoSynchronize(uint64_t nsCurrent, uint64_t nsDelay) [member function]
cls.add_method('DoSynchronize',
'bool',
[param('uint64_t', 'nsCurrent'), param('uint64_t', 'nsDelay')],
visibility='protected', is_virtual=True)
## wall-clock-synchronizer.h (module 'core'): uint64_t ns3::WallClockSynchronizer::DriftCorrect(uint64_t nsNow, uint64_t nsDelay) [member function]
cls.add_method('DriftCorrect',
'uint64_t',
[param('uint64_t', 'nsNow'), param('uint64_t', 'nsDelay')],
visibility='protected')
## wall-clock-synchronizer.h (module 'core'): uint64_t ns3::WallClockSynchronizer::GetNormalizedRealtime() [member function]
cls.add_method('GetNormalizedRealtime',
'uint64_t',
[],
visibility='protected')
## wall-clock-synchronizer.h (module 'core'): uint64_t ns3::WallClockSynchronizer::GetRealtime() [member function]
cls.add_method('GetRealtime',
'uint64_t',
[],
visibility='protected')
## wall-clock-synchronizer.h (module 'core'): void ns3::WallClockSynchronizer::NsToTimeval(int64_t ns, timeval * tv) [member function]
cls.add_method('NsToTimeval',
'void',
[param('int64_t', 'ns'), param('timeval *', 'tv')],
visibility='protected')
## wall-clock-synchronizer.h (module 'core'): bool ns3::WallClockSynchronizer::SleepWait(uint64_t arg0) [member function]
cls.add_method('SleepWait',
'bool',
[param('uint64_t', 'arg0')],
visibility='protected')
## wall-clock-synchronizer.h (module 'core'): bool ns3::WallClockSynchronizer::SpinWait(uint64_t arg0) [member function]
cls.add_method('SpinWait',
'bool',
[param('uint64_t', 'arg0')],
visibility='protected')
## wall-clock-synchronizer.h (module 'core'): void ns3::WallClockSynchronizer::TimevalAdd(timeval * tv1, timeval * tv2, timeval * result) [member function]
cls.add_method('TimevalAdd',
'void',
[param('timeval *', 'tv1'), param('timeval *', 'tv2'), param('timeval *', 'result')],
visibility='protected')
## wall-clock-synchronizer.h (module 'core'): uint64_t ns3::WallClockSynchronizer::TimevalToNs(timeval * tv) [member function]
cls.add_method('TimevalToNs',
'uint64_t',
[param('timeval *', 'tv')],
visibility='protected')
return
def register_Ns3WeibullRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::WeibullRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable::WeibullRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetScale() const [member function]
cls.add_method('GetScale',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetShape() const [member function]
cls.add_method('GetShape',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue(double scale, double shape, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'scale'), param('double', 'shape'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger(uint32_t scale, uint32_t shape, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'scale'), param('uint32_t', 'shape'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ZetaRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZetaRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable::ZetaRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue(double alpha) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'alpha')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger(uint32_t alpha) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'alpha')])
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ZipfRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZipfRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable::ZipfRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue(uint32_t n, double alpha) [member function]
cls.add_method('GetValue',
'double',
[param('uint32_t', 'n'), param('double', 'alpha')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger(uint32_t n, uint32_t alpha) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'n'), param('uint32_t', 'alpha')])
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3BooleanChecker_methods(root_module, cls):
## boolean.h (module 'core'): ns3::BooleanChecker::BooleanChecker() [constructor]
cls.add_constructor([])
## boolean.h (module 'core'): ns3::BooleanChecker::BooleanChecker(ns3::BooleanChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BooleanChecker const &', 'arg0')])
return
def register_Ns3BooleanValue_methods(root_module, cls):
cls.add_output_stream_operator()
## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue(ns3::BooleanValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BooleanValue const &', 'arg0')])
## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue() [constructor]
cls.add_constructor([])
## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue(bool value) [constructor]
cls.add_constructor([param('bool', 'value')])
## boolean.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::BooleanValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## boolean.h (module 'core'): bool ns3::BooleanValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## boolean.h (module 'core'): bool ns3::BooleanValue::Get() const [member function]
cls.add_method('Get',
'bool',
[],
is_const=True)
## boolean.h (module 'core'): std::string ns3::BooleanValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## boolean.h (module 'core'): void ns3::BooleanValue::Set(bool value) [member function]
cls.add_method('Set',
'void',
[param('bool', 'value')])
return
def register_Ns3CalendarScheduler_methods(root_module, cls):
## calendar-scheduler.h (module 'core'): ns3::CalendarScheduler::CalendarScheduler(ns3::CalendarScheduler const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CalendarScheduler const &', 'arg0')])
## calendar-scheduler.h (module 'core'): ns3::CalendarScheduler::CalendarScheduler() [constructor]
cls.add_constructor([])
## calendar-scheduler.h (module 'core'): static ns3::TypeId ns3::CalendarScheduler::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## calendar-scheduler.h (module 'core'): void ns3::CalendarScheduler::Insert(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_virtual=True)
## calendar-scheduler.h (module 'core'): bool ns3::CalendarScheduler::IsEmpty() const [member function]
cls.add_method('IsEmpty',
'bool',
[],
is_const=True, is_virtual=True)
## calendar-scheduler.h (module 'core'): ns3::Scheduler::Event ns3::CalendarScheduler::PeekNext() const [member function]
cls.add_method('PeekNext',
'ns3::Scheduler::Event',
[],
is_const=True, is_virtual=True)
## calendar-scheduler.h (module 'core'): void ns3::CalendarScheduler::Remove(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_virtual=True)
## calendar-scheduler.h (module 'core'): ns3::Scheduler::Event ns3::CalendarScheduler::RemoveNext() [member function]
cls.add_method('RemoveNext',
'ns3::Scheduler::Event',
[],
is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3ConstantRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ConstantRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable::ConstantRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetConstant() const [member function]
cls.add_method('GetConstant',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue(double constant) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'constant')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger(uint32_t constant) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'constant')])
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3DefaultSimulatorImpl_methods(root_module, cls):
## default-simulator-impl.h (module 'core'): ns3::DefaultSimulatorImpl::DefaultSimulatorImpl(ns3::DefaultSimulatorImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DefaultSimulatorImpl const &', 'arg0')])
## default-simulator-impl.h (module 'core'): ns3::DefaultSimulatorImpl::DefaultSimulatorImpl() [constructor]
cls.add_constructor([])
## default-simulator-impl.h (module 'core'): void ns3::DefaultSimulatorImpl::Cancel(ns3::EventId const & ev) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'ev')],
is_virtual=True)
## default-simulator-impl.h (module 'core'): void ns3::DefaultSimulatorImpl::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_virtual=True)
## default-simulator-impl.h (module 'core'): uint32_t ns3::DefaultSimulatorImpl::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True, is_virtual=True)
## default-simulator-impl.h (module 'core'): ns3::Time ns3::DefaultSimulatorImpl::GetDelayLeft(ns3::EventId const & id) const [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_const=True, is_virtual=True)
## default-simulator-impl.h (module 'core'): ns3::Time ns3::DefaultSimulatorImpl::GetMaximumSimulationTime() const [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_const=True, is_virtual=True)
## default-simulator-impl.h (module 'core'): uint32_t ns3::DefaultSimulatorImpl::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True, is_virtual=True)
## default-simulator-impl.h (module 'core'): static ns3::TypeId ns3::DefaultSimulatorImpl::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## default-simulator-impl.h (module 'core'): bool ns3::DefaultSimulatorImpl::IsExpired(ns3::EventId const & ev) const [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'ev')],
is_const=True, is_virtual=True)
## default-simulator-impl.h (module 'core'): bool ns3::DefaultSimulatorImpl::IsFinished() const [member function]
cls.add_method('IsFinished',
'bool',
[],
is_const=True, is_virtual=True)
## default-simulator-impl.h (module 'core'): ns3::Time ns3::DefaultSimulatorImpl::Now() const [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_const=True, is_virtual=True)
## default-simulator-impl.h (module 'core'): void ns3::DefaultSimulatorImpl::Remove(ns3::EventId const & ev) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'ev')],
is_virtual=True)
## default-simulator-impl.h (module 'core'): void ns3::DefaultSimulatorImpl::Run() [member function]
cls.add_method('Run',
'void',
[],
is_virtual=True)
## default-simulator-impl.h (module 'core'): ns3::EventId ns3::DefaultSimulatorImpl::Schedule(ns3::Time const & time, ns3::EventImpl * event) [member function]
cls.add_method('Schedule',
'ns3::EventId',
[param('ns3::Time const &', 'time'), param('ns3::EventImpl *', 'event')],
is_virtual=True)
## default-simulator-impl.h (module 'core'): ns3::EventId ns3::DefaultSimulatorImpl::ScheduleDestroy(ns3::EventImpl * event) [member function]
cls.add_method('ScheduleDestroy',
'ns3::EventId',
[param('ns3::EventImpl *', 'event')],
is_virtual=True)
## default-simulator-impl.h (module 'core'): ns3::EventId ns3::DefaultSimulatorImpl::ScheduleNow(ns3::EventImpl * event) [member function]
cls.add_method('ScheduleNow',
'ns3::EventId',
[param('ns3::EventImpl *', 'event')],
is_virtual=True)
## default-simulator-impl.h (module 'core'): void ns3::DefaultSimulatorImpl::ScheduleWithContext(uint32_t context, ns3::Time const & time, ns3::EventImpl * event) [member function]
cls.add_method('ScheduleWithContext',
'void',
[param('uint32_t', 'context'), param('ns3::Time const &', 'time'), param('ns3::EventImpl *', 'event')],
is_virtual=True)
## default-simulator-impl.h (module 'core'): void ns3::DefaultSimulatorImpl::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_virtual=True)
## default-simulator-impl.h (module 'core'): void ns3::DefaultSimulatorImpl::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_virtual=True)
## default-simulator-impl.h (module 'core'): void ns3::DefaultSimulatorImpl::Stop(ns3::Time const & time) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'time')],
is_virtual=True)
## default-simulator-impl.h (module 'core'): void ns3::DefaultSimulatorImpl::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3DeterministicRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::DeterministicRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable::DeterministicRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::DeterministicRandomVariable::SetValueArray(double * values, uint64_t length) [member function]
cls.add_method('SetValueArray',
'void',
[param('double *', 'values'), param('uint64_t', 'length')])
## random-variable-stream.h (module 'core'): double ns3::DeterministicRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::DeterministicRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3DoubleValue_methods(root_module, cls):
## double.h (module 'core'): ns3::DoubleValue::DoubleValue() [constructor]
cls.add_constructor([])
## double.h (module 'core'): ns3::DoubleValue::DoubleValue(ns3::DoubleValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DoubleValue const &', 'arg0')])
## double.h (module 'core'): ns3::DoubleValue::DoubleValue(double const & value) [constructor]
cls.add_constructor([param('double const &', 'value')])
## double.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::DoubleValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## double.h (module 'core'): bool ns3::DoubleValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## double.h (module 'core'): double ns3::DoubleValue::Get() const [member function]
cls.add_method('Get',
'double',
[],
is_const=True)
## double.h (module 'core'): std::string ns3::DoubleValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## double.h (module 'core'): void ns3::DoubleValue::Set(double const & value) [member function]
cls.add_method('Set',
'void',
[param('double const &', 'value')])
return
def register_Ns3EmpiricalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable::EmpiricalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::CDF(double v, double c) [member function]
cls.add_method('CDF',
'void',
[param('double', 'v'), param('double', 'c')])
## random-variable-stream.h (module 'core'): uint32_t ns3::EmpiricalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::EmpiricalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::Interpolate(double arg0, double arg1, double arg2, double arg3, double arg4) [member function]
cls.add_method('Interpolate',
'double',
[param('double', 'arg0'), param('double', 'arg1'), param('double', 'arg2'), param('double', 'arg3'), param('double', 'arg4')],
visibility='private', is_virtual=True)
## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::Validate() [member function]
cls.add_method('Validate',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3EnumChecker_methods(root_module, cls):
## enum.h (module 'core'): ns3::EnumChecker::EnumChecker(ns3::EnumChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EnumChecker const &', 'arg0')])
## enum.h (module 'core'): ns3::EnumChecker::EnumChecker() [constructor]
cls.add_constructor([])
## enum.h (module 'core'): void ns3::EnumChecker::Add(int v, std::string name) [member function]
cls.add_method('Add',
'void',
[param('int', 'v'), param('std::string', 'name')])
## enum.h (module 'core'): void ns3::EnumChecker::AddDefault(int v, std::string name) [member function]
cls.add_method('AddDefault',
'void',
[param('int', 'v'), param('std::string', 'name')])
## enum.h (module 'core'): bool ns3::EnumChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## enum.h (module 'core'): bool ns3::EnumChecker::Copy(ns3::AttributeValue const & src, ns3::AttributeValue & dst) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'src'), param('ns3::AttributeValue &', 'dst')],
is_const=True, is_virtual=True)
## enum.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EnumChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): std::string ns3::EnumChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): std::string ns3::EnumChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): bool ns3::EnumChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_virtual=True)
return
def register_Ns3EnumValue_methods(root_module, cls):
## enum.h (module 'core'): ns3::EnumValue::EnumValue(ns3::EnumValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EnumValue const &', 'arg0')])
## enum.h (module 'core'): ns3::EnumValue::EnumValue() [constructor]
cls.add_constructor([])
## enum.h (module 'core'): ns3::EnumValue::EnumValue(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## enum.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EnumValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): bool ns3::EnumValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## enum.h (module 'core'): int ns3::EnumValue::Get() const [member function]
cls.add_method('Get',
'int',
[],
is_const=True)
## enum.h (module 'core'): std::string ns3::EnumValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## enum.h (module 'core'): void ns3::EnumValue::Set(int v) [member function]
cls.add_method('Set',
'void',
[param('int', 'v')])
return
def register_Ns3ErlangRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ErlangRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable::ErlangRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetK() const [member function]
cls.add_method('GetK',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetLambda() const [member function]
cls.add_method('GetLambda',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue(uint32_t k, double lambda) [member function]
cls.add_method('GetValue',
'double',
[param('uint32_t', 'k'), param('double', 'lambda')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger(uint32_t k, uint32_t lambda) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'k'), param('uint32_t', 'lambda')])
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3EventImpl_methods(root_module, cls):
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventImpl const &', 'arg0')])
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor]
cls.add_constructor([])
## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[])
## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function]
cls.add_method('IsCancelled',
'bool',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function]
cls.add_method('Notify',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3ExponentialRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ExponentialRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable::ExponentialRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue(double mean, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger(uint32_t mean, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3FdReader_methods(root_module, cls):
## unix-fd-reader.h (module 'core'): ns3::FdReader::FdReader(ns3::FdReader const & arg0) [copy constructor]
cls.add_constructor([param('ns3::FdReader const &', 'arg0')])
## unix-fd-reader.h (module 'core'): ns3::FdReader::FdReader() [constructor]
cls.add_constructor([])
## unix-fd-reader.h (module 'core'): void ns3::FdReader::Start(int fd, ns3::Callback<void, unsigned char*, int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> readCallback) [member function]
cls.add_method('Start',
'void',
[param('int', 'fd'), param('ns3::Callback< void, unsigned char *, int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'readCallback')])
## unix-fd-reader.h (module 'core'): void ns3::FdReader::Stop() [member function]
cls.add_method('Stop',
'void',
[])
## unix-fd-reader.h (module 'core'): ns3::FdReader::Data ns3::FdReader::DoRead() [member function]
cls.add_method('DoRead',
'ns3::FdReader::Data',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3GammaRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::GammaRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable::GammaRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetBeta() const [member function]
cls.add_method('GetBeta',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue(double alpha, double beta) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'alpha'), param('double', 'beta')])
## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger(uint32_t alpha, uint32_t beta) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'alpha'), param('uint32_t', 'beta')])
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3HeapScheduler_methods(root_module, cls):
## heap-scheduler.h (module 'core'): ns3::HeapScheduler::HeapScheduler(ns3::HeapScheduler const & arg0) [copy constructor]
cls.add_constructor([param('ns3::HeapScheduler const &', 'arg0')])
## heap-scheduler.h (module 'core'): ns3::HeapScheduler::HeapScheduler() [constructor]
cls.add_constructor([])
## heap-scheduler.h (module 'core'): static ns3::TypeId ns3::HeapScheduler::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## heap-scheduler.h (module 'core'): void ns3::HeapScheduler::Insert(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_virtual=True)
## heap-scheduler.h (module 'core'): bool ns3::HeapScheduler::IsEmpty() const [member function]
cls.add_method('IsEmpty',
'bool',
[],
is_const=True, is_virtual=True)
## heap-scheduler.h (module 'core'): ns3::Scheduler::Event ns3::HeapScheduler::PeekNext() const [member function]
cls.add_method('PeekNext',
'ns3::Scheduler::Event',
[],
is_const=True, is_virtual=True)
## heap-scheduler.h (module 'core'): void ns3::HeapScheduler::Remove(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_virtual=True)
## heap-scheduler.h (module 'core'): ns3::Scheduler::Event ns3::HeapScheduler::RemoveNext() [member function]
cls.add_method('RemoveNext',
'ns3::Scheduler::Event',
[],
is_virtual=True)
return
def register_Ns3IntegerValue_methods(root_module, cls):
## integer.h (module 'core'): ns3::IntegerValue::IntegerValue() [constructor]
cls.add_constructor([])
## integer.h (module 'core'): ns3::IntegerValue::IntegerValue(ns3::IntegerValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntegerValue const &', 'arg0')])
## integer.h (module 'core'): ns3::IntegerValue::IntegerValue(int64_t const & value) [constructor]
cls.add_constructor([param('int64_t const &', 'value')])
## integer.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::IntegerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## integer.h (module 'core'): bool ns3::IntegerValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## integer.h (module 'core'): int64_t ns3::IntegerValue::Get() const [member function]
cls.add_method('Get',
'int64_t',
[],
is_const=True)
## integer.h (module 'core'): std::string ns3::IntegerValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## integer.h (module 'core'): void ns3::IntegerValue::Set(int64_t const & value) [member function]
cls.add_method('Set',
'void',
[param('int64_t const &', 'value')])
return
def register_Ns3ListScheduler_methods(root_module, cls):
## list-scheduler.h (module 'core'): ns3::ListScheduler::ListScheduler(ns3::ListScheduler const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ListScheduler const &', 'arg0')])
## list-scheduler.h (module 'core'): ns3::ListScheduler::ListScheduler() [constructor]
cls.add_constructor([])
## list-scheduler.h (module 'core'): static ns3::TypeId ns3::ListScheduler::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## list-scheduler.h (module 'core'): void ns3::ListScheduler::Insert(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_virtual=True)
## list-scheduler.h (module 'core'): bool ns3::ListScheduler::IsEmpty() const [member function]
cls.add_method('IsEmpty',
'bool',
[],
is_const=True, is_virtual=True)
## list-scheduler.h (module 'core'): ns3::Scheduler::Event ns3::ListScheduler::PeekNext() const [member function]
cls.add_method('PeekNext',
'ns3::Scheduler::Event',
[],
is_const=True, is_virtual=True)
## list-scheduler.h (module 'core'): void ns3::ListScheduler::Remove(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_virtual=True)
## list-scheduler.h (module 'core'): ns3::Scheduler::Event ns3::ListScheduler::RemoveNext() [member function]
cls.add_method('RemoveNext',
'ns3::Scheduler::Event',
[],
is_virtual=True)
return
def register_Ns3LogNormalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::LogNormalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable::LogNormalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetMu() const [member function]
cls.add_method('GetMu',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetSigma() const [member function]
cls.add_method('GetSigma',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue(double mu, double sigma) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mu'), param('double', 'sigma')])
## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger(uint32_t mu, uint32_t sigma) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mu'), param('uint32_t', 'sigma')])
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3MapScheduler_methods(root_module, cls):
## map-scheduler.h (module 'core'): ns3::MapScheduler::MapScheduler(ns3::MapScheduler const & arg0) [copy constructor]
cls.add_constructor([param('ns3::MapScheduler const &', 'arg0')])
## map-scheduler.h (module 'core'): ns3::MapScheduler::MapScheduler() [constructor]
cls.add_constructor([])
## map-scheduler.h (module 'core'): static ns3::TypeId ns3::MapScheduler::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## map-scheduler.h (module 'core'): void ns3::MapScheduler::Insert(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_virtual=True)
## map-scheduler.h (module 'core'): bool ns3::MapScheduler::IsEmpty() const [member function]
cls.add_method('IsEmpty',
'bool',
[],
is_const=True, is_virtual=True)
## map-scheduler.h (module 'core'): ns3::Scheduler::Event ns3::MapScheduler::PeekNext() const [member function]
cls.add_method('PeekNext',
'ns3::Scheduler::Event',
[],
is_const=True, is_virtual=True)
## map-scheduler.h (module 'core'): void ns3::MapScheduler::Remove(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_virtual=True)
## map-scheduler.h (module 'core'): ns3::Scheduler::Event ns3::MapScheduler::RemoveNext() [member function]
cls.add_method('RemoveNext',
'ns3::Scheduler::Event',
[],
is_virtual=True)
return
def register_Ns3NormalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::INFINITE_VALUE [variable]
cls.add_static_attribute('INFINITE_VALUE', 'double const', is_const=True)
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::NormalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::NormalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetVariance() const [member function]
cls.add_method('GetVariance',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue(double mean, double variance, double bound=ns3::NormalRandomVariable::INFINITE_VALUE) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'variance'), param('double', 'bound', default_value='ns3::NormalRandomVariable::INFINITE_VALUE')])
## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger(uint32_t mean, uint32_t variance, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'variance'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3ObjectPtrContainerAccessor_methods(root_module, cls):
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerAccessor::ObjectPtrContainerAccessor() [constructor]
cls.add_constructor([])
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerAccessor::ObjectPtrContainerAccessor(ns3::ObjectPtrContainerAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectPtrContainerAccessor const &', 'arg0')])
## object-ptr-container.h (module 'core'): bool ns3::ObjectPtrContainerAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & value) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'value')],
is_const=True, is_virtual=True)
## object-ptr-container.h (module 'core'): bool ns3::ObjectPtrContainerAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_const=True, is_virtual=True)
## object-ptr-container.h (module 'core'): bool ns3::ObjectPtrContainerAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_const=True, is_virtual=True)
## object-ptr-container.h (module 'core'): bool ns3::ObjectPtrContainerAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object'), param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## object-ptr-container.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectPtrContainerAccessor::DoGet(ns3::ObjectBase const * object, uint32_t i, uint32_t * index) const [member function]
cls.add_method('DoGet',
'ns3::Ptr< ns3::Object >',
[param('ns3::ObjectBase const *', 'object'), param('uint32_t', 'i'), param('uint32_t *', 'index')],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## object-ptr-container.h (module 'core'): bool ns3::ObjectPtrContainerAccessor::DoGetN(ns3::ObjectBase const * object, uint32_t * n) const [member function]
cls.add_method('DoGetN',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('uint32_t *', 'n')],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3ObjectPtrContainerChecker_methods(root_module, cls):
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerChecker::ObjectPtrContainerChecker() [constructor]
cls.add_constructor([])
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerChecker::ObjectPtrContainerChecker(ns3::ObjectPtrContainerChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectPtrContainerChecker const &', 'arg0')])
## object-ptr-container.h (module 'core'): ns3::TypeId ns3::ObjectPtrContainerChecker::GetItemTypeId() const [member function]
cls.add_method('GetItemTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3ObjectPtrContainerValue_methods(root_module, cls):
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerValue::ObjectPtrContainerValue(ns3::ObjectPtrContainerValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectPtrContainerValue const &', 'arg0')])
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerValue::ObjectPtrContainerValue() [constructor]
cls.add_constructor([])
## object-ptr-container.h (module 'core'): std::_Rb_tree_const_iterator<std::pair<const unsigned int, ns3::Ptr<ns3::Object> > > ns3::ObjectPtrContainerValue::Begin() const [member function]
cls.add_method('Begin',
'std::_Rb_tree_const_iterator< std::pair< unsigned int const, ns3::Ptr< ns3::Object > > >',
[],
is_const=True)
## object-ptr-container.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectPtrContainerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-ptr-container.h (module 'core'): bool ns3::ObjectPtrContainerValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-ptr-container.h (module 'core'): std::_Rb_tree_const_iterator<std::pair<const unsigned int, ns3::Ptr<ns3::Object> > > ns3::ObjectPtrContainerValue::End() const [member function]
cls.add_method('End',
'std::_Rb_tree_const_iterator< std::pair< unsigned int const, ns3::Ptr< ns3::Object > > >',
[],
is_const=True)
## object-ptr-container.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectPtrContainerValue::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::Object >',
[param('uint32_t', 'i')],
is_const=True)
## object-ptr-container.h (module 'core'): uint32_t ns3::ObjectPtrContainerValue::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
## object-ptr-container.h (module 'core'): std::string ns3::ObjectPtrContainerValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
return
def register_Ns3ParetoRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ParetoRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable::ParetoRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetShape() const [member function]
cls.add_method('GetShape',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue(double mean, double shape, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'shape'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger(uint32_t mean, uint32_t shape, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'shape'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3PointerChecker_methods(root_module, cls):
## pointer.h (module 'core'): ns3::PointerChecker::PointerChecker() [constructor]
cls.add_constructor([])
## pointer.h (module 'core'): ns3::PointerChecker::PointerChecker(ns3::PointerChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PointerChecker const &', 'arg0')])
## pointer.h (module 'core'): ns3::TypeId ns3::PointerChecker::GetPointeeTypeId() const [member function]
cls.add_method('GetPointeeTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3PointerValue_methods(root_module, cls):
## pointer.h (module 'core'): ns3::PointerValue::PointerValue(ns3::PointerValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PointerValue const &', 'arg0')])
## pointer.h (module 'core'): ns3::PointerValue::PointerValue() [constructor]
cls.add_constructor([])
## pointer.h (module 'core'): ns3::PointerValue::PointerValue(ns3::Ptr<ns3::Object> object) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Object >', 'object')])
## pointer.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::PointerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## pointer.h (module 'core'): bool ns3::PointerValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## pointer.h (module 'core'): ns3::Ptr<ns3::Object> ns3::PointerValue::GetObject() const [member function]
cls.add_method('GetObject',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## pointer.h (module 'core'): std::string ns3::PointerValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## pointer.h (module 'core'): void ns3::PointerValue::SetObject(ns3::Ptr<ns3::Object> object) [member function]
cls.add_method('SetObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'object')])
return
def register_Ns3RandomVariableChecker_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::RandomVariableChecker::RandomVariableChecker() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::RandomVariableChecker::RandomVariableChecker(ns3::RandomVariableChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RandomVariableChecker const &', 'arg0')])
return
def register_Ns3RandomVariableValue_methods(root_module, cls):
## random-variable.h (module 'core'): ns3::RandomVariableValue::RandomVariableValue() [constructor]
cls.add_constructor([])
## random-variable.h (module 'core'): ns3::RandomVariableValue::RandomVariableValue(ns3::RandomVariableValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RandomVariableValue const &', 'arg0')])
## random-variable.h (module 'core'): ns3::RandomVariableValue::RandomVariableValue(ns3::RandomVariable const & value) [constructor]
cls.add_constructor([param('ns3::RandomVariable const &', 'value')])
## random-variable.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::RandomVariableValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## random-variable.h (module 'core'): bool ns3::RandomVariableValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## random-variable.h (module 'core'): ns3::RandomVariable ns3::RandomVariableValue::Get() const [member function]
cls.add_method('Get',
'ns3::RandomVariable',
[],
is_const=True)
## random-variable.h (module 'core'): std::string ns3::RandomVariableValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## random-variable.h (module 'core'): void ns3::RandomVariableValue::Set(ns3::RandomVariable const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::RandomVariable const &', 'value')])
return
def register_Ns3RealtimeSimulatorImpl_methods(root_module, cls):
## realtime-simulator-impl.h (module 'core'): ns3::RealtimeSimulatorImpl::RealtimeSimulatorImpl(ns3::RealtimeSimulatorImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RealtimeSimulatorImpl const &', 'arg0')])
## realtime-simulator-impl.h (module 'core'): ns3::RealtimeSimulatorImpl::RealtimeSimulatorImpl() [constructor]
cls.add_constructor([])
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::Cancel(ns3::EventId const & ev) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'ev')],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): uint32_t ns3::RealtimeSimulatorImpl::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True, is_virtual=True)
## realtime-simulator-impl.h (module 'core'): ns3::Time ns3::RealtimeSimulatorImpl::GetDelayLeft(ns3::EventId const & id) const [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_const=True, is_virtual=True)
## realtime-simulator-impl.h (module 'core'): ns3::Time ns3::RealtimeSimulatorImpl::GetHardLimit() const [member function]
cls.add_method('GetHardLimit',
'ns3::Time',
[],
is_const=True)
## realtime-simulator-impl.h (module 'core'): ns3::Time ns3::RealtimeSimulatorImpl::GetMaximumSimulationTime() const [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_const=True, is_virtual=True)
## realtime-simulator-impl.h (module 'core'): ns3::RealtimeSimulatorImpl::SynchronizationMode ns3::RealtimeSimulatorImpl::GetSynchronizationMode() const [member function]
cls.add_method('GetSynchronizationMode',
'ns3::RealtimeSimulatorImpl::SynchronizationMode',
[],
is_const=True)
## realtime-simulator-impl.h (module 'core'): uint32_t ns3::RealtimeSimulatorImpl::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True, is_virtual=True)
## realtime-simulator-impl.h (module 'core'): static ns3::TypeId ns3::RealtimeSimulatorImpl::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## realtime-simulator-impl.h (module 'core'): bool ns3::RealtimeSimulatorImpl::IsExpired(ns3::EventId const & ev) const [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'ev')],
is_const=True, is_virtual=True)
## realtime-simulator-impl.h (module 'core'): bool ns3::RealtimeSimulatorImpl::IsFinished() const [member function]
cls.add_method('IsFinished',
'bool',
[],
is_const=True, is_virtual=True)
## realtime-simulator-impl.h (module 'core'): ns3::Time ns3::RealtimeSimulatorImpl::Now() const [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_const=True, is_virtual=True)
## realtime-simulator-impl.h (module 'core'): ns3::Time ns3::RealtimeSimulatorImpl::RealtimeNow() const [member function]
cls.add_method('RealtimeNow',
'ns3::Time',
[],
is_const=True)
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::Remove(ns3::EventId const & ev) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'ev')],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::Run() [member function]
cls.add_method('Run',
'void',
[],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): ns3::EventId ns3::RealtimeSimulatorImpl::Schedule(ns3::Time const & time, ns3::EventImpl * event) [member function]
cls.add_method('Schedule',
'ns3::EventId',
[param('ns3::Time const &', 'time'), param('ns3::EventImpl *', 'event')],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): ns3::EventId ns3::RealtimeSimulatorImpl::ScheduleDestroy(ns3::EventImpl * event) [member function]
cls.add_method('ScheduleDestroy',
'ns3::EventId',
[param('ns3::EventImpl *', 'event')],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): ns3::EventId ns3::RealtimeSimulatorImpl::ScheduleNow(ns3::EventImpl * event) [member function]
cls.add_method('ScheduleNow',
'ns3::EventId',
[param('ns3::EventImpl *', 'event')],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::ScheduleRealtime(ns3::Time const & time, ns3::EventImpl * event) [member function]
cls.add_method('ScheduleRealtime',
'void',
[param('ns3::Time const &', 'time'), param('ns3::EventImpl *', 'event')])
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::ScheduleRealtimeNow(ns3::EventImpl * event) [member function]
cls.add_method('ScheduleRealtimeNow',
'void',
[param('ns3::EventImpl *', 'event')])
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::ScheduleRealtimeNowWithContext(uint32_t context, ns3::EventImpl * event) [member function]
cls.add_method('ScheduleRealtimeNowWithContext',
'void',
[param('uint32_t', 'context'), param('ns3::EventImpl *', 'event')])
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::ScheduleRealtimeWithContext(uint32_t context, ns3::Time const & time, ns3::EventImpl * event) [member function]
cls.add_method('ScheduleRealtimeWithContext',
'void',
[param('uint32_t', 'context'), param('ns3::Time const &', 'time'), param('ns3::EventImpl *', 'event')])
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::ScheduleWithContext(uint32_t context, ns3::Time const & time, ns3::EventImpl * event) [member function]
cls.add_method('ScheduleWithContext',
'void',
[param('uint32_t', 'context'), param('ns3::Time const &', 'time'), param('ns3::EventImpl *', 'event')],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::SetHardLimit(ns3::Time limit) [member function]
cls.add_method('SetHardLimit',
'void',
[param('ns3::Time', 'limit')])
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::SetSynchronizationMode(ns3::RealtimeSimulatorImpl::SynchronizationMode mode) [member function]
cls.add_method('SetSynchronizationMode',
'void',
[param('ns3::RealtimeSimulatorImpl::SynchronizationMode', 'mode')])
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::Stop(ns3::Time const & time) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'time')],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3RefCountBase_methods(root_module, cls):
## ref-count-base.h (module 'core'): ns3::RefCountBase::RefCountBase() [constructor]
cls.add_constructor([])
## ref-count-base.h (module 'core'): ns3::RefCountBase::RefCountBase(ns3::RefCountBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RefCountBase const &', 'arg0')])
return
def register_Ns3StringChecker_methods(root_module, cls):
## string.h (module 'core'): ns3::StringChecker::StringChecker() [constructor]
cls.add_constructor([])
## string.h (module 'core'): ns3::StringChecker::StringChecker(ns3::StringChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::StringChecker const &', 'arg0')])
return
def register_Ns3StringValue_methods(root_module, cls):
## string.h (module 'core'): ns3::StringValue::StringValue() [constructor]
cls.add_constructor([])
## string.h (module 'core'): ns3::StringValue::StringValue(ns3::StringValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::StringValue const &', 'arg0')])
## string.h (module 'core'): ns3::StringValue::StringValue(std::string const & value) [constructor]
cls.add_constructor([param('std::string const &', 'value')])
## string.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::StringValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## string.h (module 'core'): bool ns3::StringValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## string.h (module 'core'): std::string ns3::StringValue::Get() const [member function]
cls.add_method('Get',
'std::string',
[],
is_const=True)
## string.h (module 'core'): std::string ns3::StringValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## string.h (module 'core'): void ns3::StringValue::Set(std::string const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string const &', 'value')])
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3UintegerValue_methods(root_module, cls):
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue() [constructor]
cls.add_constructor([])
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue(ns3::UintegerValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UintegerValue const &', 'arg0')])
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue(uint64_t const & value) [constructor]
cls.add_constructor([param('uint64_t const &', 'value')])
## uinteger.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::UintegerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## uinteger.h (module 'core'): bool ns3::UintegerValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## uinteger.h (module 'core'): uint64_t ns3::UintegerValue::Get() const [member function]
cls.add_method('Get',
'uint64_t',
[],
is_const=True)
## uinteger.h (module 'core'): std::string ns3::UintegerValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## uinteger.h (module 'core'): void ns3::UintegerValue::Set(uint64_t const & value) [member function]
cls.add_method('Set',
'void',
[param('uint64_t const &', 'value')])
return
def register_Ns3Vector2DChecker_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker(ns3::Vector2DChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2DChecker const &', 'arg0')])
return
def register_Ns3Vector2DValue_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2DValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2DValue const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2D const & value) [constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'value')])
## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector2DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h (module 'core'): bool ns3::Vector2DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h (module 'core'): ns3::Vector2D ns3::Vector2DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector2D',
[],
is_const=True)
## vector.h (module 'core'): std::string ns3::Vector2DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h (module 'core'): void ns3::Vector2DValue::Set(ns3::Vector2D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector2D const &', 'value')])
return
def register_Ns3Vector3DChecker_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker(ns3::Vector3DChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3DChecker const &', 'arg0')])
return
def register_Ns3Vector3DValue_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3DValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3DValue const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3D const & value) [constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'value')])
## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector3DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h (module 'core'): bool ns3::Vector3DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h (module 'core'): ns3::Vector3D ns3::Vector3DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector3D',
[],
is_const=True)
## vector.h (module 'core'): std::string ns3::Vector3DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h (module 'core'): void ns3::Vector3DValue::Set(ns3::Vector3D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector3D const &', 'value')])
return
def register_Ns3ConfigMatchContainer_methods(root_module, cls):
## config.h (module 'core'): ns3::Config::MatchContainer::MatchContainer(ns3::Config::MatchContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Config::MatchContainer const &', 'arg0')])
## config.h (module 'core'): ns3::Config::MatchContainer::MatchContainer() [constructor]
cls.add_constructor([])
## config.h (module 'core'): ns3::Config::MatchContainer::MatchContainer(std::vector<ns3::Ptr<ns3::Object>, std::allocator<ns3::Ptr<ns3::Object> > > const & objects, std::vector<std::string, std::allocator<std::string> > const & contexts, std::string path) [constructor]
cls.add_constructor([param('std::vector< ns3::Ptr< ns3::Object > > const &', 'objects'), param('std::vector< std::string > const &', 'contexts'), param('std::string', 'path')])
## config.h (module 'core'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Object>*,std::vector<ns3::Ptr<ns3::Object>, std::allocator<ns3::Ptr<ns3::Object> > > > ns3::Config::MatchContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Object > const, std::vector< ns3::Ptr< ns3::Object > > >',
[],
is_const=True)
## config.h (module 'core'): void ns3::Config::MatchContainer::Connect(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('Connect',
'void',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## config.h (module 'core'): void ns3::Config::MatchContainer::ConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('ConnectWithoutContext',
'void',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## config.h (module 'core'): void ns3::Config::MatchContainer::Disconnect(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('Disconnect',
'void',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## config.h (module 'core'): void ns3::Config::MatchContainer::DisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('DisconnectWithoutContext',
'void',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## config.h (module 'core'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Object>*,std::vector<ns3::Ptr<ns3::Object>, std::allocator<ns3::Ptr<ns3::Object> > > > ns3::Config::MatchContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Object > const, std::vector< ns3::Ptr< ns3::Object > > >',
[],
is_const=True)
## config.h (module 'core'): ns3::Ptr<ns3::Object> ns3::Config::MatchContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::Object >',
[param('uint32_t', 'i')],
is_const=True)
## config.h (module 'core'): std::string ns3::Config::MatchContainer::GetMatchedPath(uint32_t i) const [member function]
cls.add_method('GetMatchedPath',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## config.h (module 'core'): uint32_t ns3::Config::MatchContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
## config.h (module 'core'): std::string ns3::Config::MatchContainer::GetPath() const [member function]
cls.add_method('GetPath',
'std::string',
[],
is_const=True)
## config.h (module 'core'): void ns3::Config::MatchContainer::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
## nstime.h (module 'core'): ns3::Time ns3::Abs(ns3::Time const & time) [free function]
module.add_function('Abs',
'ns3::Time',
[param('ns3::Time const &', 'time')])
## int64x64.h (module 'core'): ns3::int64x64_t ns3::Abs(ns3::int64x64_t const & value) [free function]
module.add_function('Abs',
'ns3::int64x64_t',
[param('ns3::int64x64_t const &', 'value')])
## breakpoint.h (module 'core'): extern void ns3::BreakpointFallback() [free function]
module.add_function('BreakpointFallback',
'void',
[])
## vector.h (module 'core'): extern double ns3::CalculateDistance(ns3::Vector2D const & a, ns3::Vector2D const & b) [free function]
module.add_function('CalculateDistance',
'double',
[param('ns3::Vector2D const &', 'a'), param('ns3::Vector2D const &', 'b')])
## vector.h (module 'core'): extern double ns3::CalculateDistance(ns3::Vector3D const & a, ns3::Vector3D const & b) [free function]
module.add_function('CalculateDistance',
'double',
[param('ns3::Vector3D const &', 'a'), param('ns3::Vector3D const &', 'b')])
## ptr.h (module 'core'): extern ns3::Ptr<ns3::ObjectPtrContainerValue> ns3::Create() [free function]
module.add_function('Create',
'ns3::Ptr< ns3::ObjectPtrContainerValue >',
[],
template_parameters=['ns3::ObjectPtrContainerValue'])
## ptr.h (module 'core'): extern ns3::Ptr<ns3::PointerValue> ns3::Create() [free function]
module.add_function('Create',
'ns3::Ptr< ns3::PointerValue >',
[],
template_parameters=['ns3::PointerValue'])
## nstime.h (module 'core'): ns3::Time ns3::FemtoSeconds(ns3::int64x64_t fs) [free function]
module.add_function('FemtoSeconds',
'ns3::Time',
[param('ns3::int64x64_t', 'fs')])
## nstime.h (module 'core'): ns3::Time ns3::FemtoSeconds(uint64_t fs) [free function]
module.add_function('FemtoSeconds',
'ns3::Time',
[param('uint64_t', 'fs')])
## hash.h (module 'core'): uint32_t ns3::Hash32(std::string const s) [free function]
module.add_function('Hash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint32_t ns3::Hash32(char const * buffer, size_t const size) [free function]
module.add_function('Hash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hash64(std::string const s) [free function]
module.add_function('Hash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hash64(char const * buffer, size_t const size) [free function]
module.add_function('Hash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## log.h (module 'core'): extern void ns3::LogComponentDisable(char const * name, ns3::LogLevel level) [free function]
module.add_function('LogComponentDisable',
'void',
[param('char const *', 'name'), param('ns3::LogLevel', 'level')])
## log.h (module 'core'): extern void ns3::LogComponentDisableAll(ns3::LogLevel level) [free function]
module.add_function('LogComponentDisableAll',
'void',
[param('ns3::LogLevel', 'level')])
## log.h (module 'core'): extern void ns3::LogComponentEnable(char const * name, ns3::LogLevel level) [free function]
module.add_function('LogComponentEnable',
'void',
[param('char const *', 'name'), param('ns3::LogLevel', 'level')])
## log.h (module 'core'): extern void ns3::LogComponentEnableAll(ns3::LogLevel level) [free function]
module.add_function('LogComponentEnableAll',
'void',
[param('ns3::LogLevel', 'level')])
## log.h (module 'core'): extern void ns3::LogComponentPrintList() [free function]
module.add_function('LogComponentPrintList',
'void',
[])
## log.h (module 'core'): extern ns3::LogNodePrinter ns3::LogGetNodePrinter() [free function]
module.add_function('LogGetNodePrinter',
'ns3::LogNodePrinter',
[])
## log.h (module 'core'): extern ns3::LogTimePrinter ns3::LogGetTimePrinter() [free function]
module.add_function('LogGetTimePrinter',
'ns3::LogTimePrinter',
[])
## log.h (module 'core'): extern void ns3::LogSetNodePrinter(ns3::LogNodePrinter arg0) [free function]
module.add_function('LogSetNodePrinter',
'void',
[param('ns3::LogNodePrinter', 'arg0')])
## log.h (module 'core'): extern void ns3::LogSetTimePrinter(ns3::LogTimePrinter arg0) [free function]
module.add_function('LogSetTimePrinter',
'void',
[param('ns3::LogTimePrinter', 'arg0')])
## boolean.h (module 'core'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeBooleanChecker() [free function]
module.add_function('MakeBooleanChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## callback.h (module 'core'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeCallbackChecker() [free function]
module.add_function('MakeCallbackChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## enum.h (module 'core'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeEnumChecker(int v1, std::string n1, int v2=0, std::string n2="", int v3=0, std::string n3="", int v4=0, std::string n4="", int v5=0, std::string n5="", int v6=0, std::string n6="", int v7=0, std::string n7="", int v8=0, std::string n8="", int v9=0, std::string n9="", int v10=0, std::string n10="", int v11=0, std::string n11="", int v12=0, std::string n12="", int v13=0, std::string n13="", int v14=0, std::string n14="", int v15=0, std::string n15="", int v16=0, std::string n16="", int v17=0, std::string n17="", int v18=0, std::string n18="", int v19=0, std::string n19="", int v20=0, std::string n20="", int v21=0, std::string n21="", int v22=0, std::string n22="") [free function]
module.add_function('MakeEnumChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[param('int', 'v1'), param('std::string', 'n1'), param('int', 'v2', default_value='0'), param('std::string', 'n2', default_value='""'), param('int', 'v3', default_value='0'), param('std::string', 'n3', default_value='""'), param('int', 'v4', default_value='0'), param('std::string', 'n4', default_value='""'), param('int', 'v5', default_value='0'), param('std::string', 'n5', default_value='""'), param('int', 'v6', default_value='0'), param('std::string', 'n6', default_value='""'), param('int', 'v7', default_value='0'), param('std::string', 'n7', default_value='""'), param('int', 'v8', default_value='0'), param('std::string', 'n8', default_value='""'), param('int', 'v9', default_value='0'), param('std::string', 'n9', default_value='""'), param('int', 'v10', default_value='0'), param('std::string', 'n10', default_value='""'), param('int', 'v11', default_value='0'), param('std::string', 'n11', default_value='""'), param('int', 'v12', default_value='0'), param('std::string', 'n12', default_value='""'), param('int', 'v13', default_value='0'), param('std::string', 'n13', default_value='""'), param('int', 'v14', default_value='0'), param('std::string', 'n14', default_value='""'), param('int', 'v15', default_value='0'), param('std::string', 'n15', default_value='""'), param('int', 'v16', default_value='0'), param('std::string', 'n16', default_value='""'), param('int', 'v17', default_value='0'), param('std::string', 'n17', default_value='""'), param('int', 'v18', default_value='0'), param('std::string', 'n18', default_value='""'), param('int', 'v19', default_value='0'), param('std::string', 'n19', default_value='""'), param('int', 'v20', default_value='0'), param('std::string', 'n20', default_value='""'), param('int', 'v21', default_value='0'), param('std::string', 'n21', default_value='""'), param('int', 'v22', default_value='0'), param('std::string', 'n22', default_value='""')])
## make-event.h (module 'core'): extern ns3::EventImpl * ns3::MakeEvent(void (*)( ) * f) [free function]
module.add_function('MakeEvent',
'ns3::EventImpl *',
[param('void ( * ) ( ) *', 'f')])
## object-factory.h (module 'core'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeObjectFactoryChecker() [free function]
module.add_function('MakeObjectFactoryChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## random-variable.h (module 'core'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeRandomVariableChecker() [free function]
module.add_function('MakeRandomVariableChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## string.h (module 'core'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeStringChecker() [free function]
module.add_function('MakeStringChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeChecker const> ns3::MakeTimeChecker() [free function]
module.add_function('MakeTimeChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeChecker const> ns3::MakeTimeChecker(ns3::Time const min) [free function]
module.add_function('MakeTimeChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[param('ns3::Time const', 'min')])
## nstime.h (module 'core'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeTimeChecker(ns3::Time const min, ns3::Time const max) [free function]
module.add_function('MakeTimeChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[param('ns3::Time const', 'min'), param('ns3::Time const', 'max')])
## type-id.h (module 'core'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeTypeIdChecker() [free function]
module.add_function('MakeTypeIdChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## vector.h (module 'core'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeVector2DChecker() [free function]
module.add_function('MakeVector2DChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## vector.h (module 'core'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeVector3DChecker() [free function]
module.add_function('MakeVector3DChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## vector.h (module 'core'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeVectorChecker() [free function]
module.add_function('MakeVectorChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## nstime.h (module 'core'): ns3::Time ns3::Max(ns3::Time const & ta, ns3::Time const & tb) [free function]
module.add_function('Max',
'ns3::Time',
[param('ns3::Time const &', 'ta'), param('ns3::Time const &', 'tb')])
## int64x64.h (module 'core'): ns3::int64x64_t ns3::Max(ns3::int64x64_t const & a, ns3::int64x64_t const & b) [free function]
module.add_function('Max',
'ns3::int64x64_t',
[param('ns3::int64x64_t const &', 'a'), param('ns3::int64x64_t const &', 'b')])
## nstime.h (module 'core'): ns3::Time ns3::MicroSeconds(ns3::int64x64_t us) [free function]
module.add_function('MicroSeconds',
'ns3::Time',
[param('ns3::int64x64_t', 'us')])
## nstime.h (module 'core'): ns3::Time ns3::MicroSeconds(uint64_t us) [free function]
module.add_function('MicroSeconds',
'ns3::Time',
[param('uint64_t', 'us')])
## nstime.h (module 'core'): ns3::Time ns3::MilliSeconds(ns3::int64x64_t ms) [free function]
module.add_function('MilliSeconds',
'ns3::Time',
[param('ns3::int64x64_t', 'ms')])
## nstime.h (module 'core'): ns3::Time ns3::MilliSeconds(uint64_t ms) [free function]
module.add_function('MilliSeconds',
'ns3::Time',
[param('uint64_t', 'ms')])
## nstime.h (module 'core'): ns3::Time ns3::Min(ns3::Time const & ta, ns3::Time const & tb) [free function]
module.add_function('Min',
'ns3::Time',
[param('ns3::Time const &', 'ta'), param('ns3::Time const &', 'tb')])
## int64x64.h (module 'core'): ns3::int64x64_t ns3::Min(ns3::int64x64_t const & a, ns3::int64x64_t const & b) [free function]
module.add_function('Min',
'ns3::int64x64_t',
[param('ns3::int64x64_t const &', 'a'), param('ns3::int64x64_t const &', 'b')])
## nstime.h (module 'core'): ns3::Time ns3::NanoSeconds(ns3::int64x64_t ns) [free function]
module.add_function('NanoSeconds',
'ns3::Time',
[param('ns3::int64x64_t', 'ns')])
## nstime.h (module 'core'): ns3::Time ns3::NanoSeconds(uint64_t ns) [free function]
module.add_function('NanoSeconds',
'ns3::Time',
[param('uint64_t', 'ns')])
## simulator.h (module 'core'): extern ns3::Time ns3::Now() [free function]
module.add_function('Now',
'ns3::Time',
[])
## nstime.h (module 'core'): ns3::Time ns3::PicoSeconds(ns3::int64x64_t ps) [free function]
module.add_function('PicoSeconds',
'ns3::Time',
[param('ns3::int64x64_t', 'ps')])
## nstime.h (module 'core'): ns3::Time ns3::PicoSeconds(uint64_t ps) [free function]
module.add_function('PicoSeconds',
'ns3::Time',
[param('uint64_t', 'ps')])
## nstime.h (module 'core'): ns3::Time ns3::Seconds(ns3::int64x64_t seconds) [free function]
module.add_function('Seconds',
'ns3::Time',
[param('ns3::int64x64_t', 'seconds')])
## nstime.h (module 'core'): ns3::Time ns3::Seconds(double seconds) [free function]
module.add_function('Seconds',
'ns3::Time',
[param('double', 'seconds')])
## test.h (module 'core'): extern bool ns3::TestDoubleIsEqual(double const a, double const b, double const epsilon=std::numeric_limits<double>::epsilon()) [free function]
module.add_function('TestDoubleIsEqual',
'bool',
[param('double const', 'a'), param('double const', 'b'), param('double const', 'epsilon', default_value='std::numeric_limits<double>::epsilon()')])
## nstime.h (module 'core'): ns3::Time ns3::TimeStep(uint64_t ts) [free function]
module.add_function('TimeStep',
'ns3::Time',
[param('uint64_t', 'ts')])
## type-name.h (module 'core'): extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['double'])
## type-name.h (module 'core'): extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['float'])
## type-name.h (module 'core'): extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['long'])
## type-name.h (module 'core'): extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['int'])
## type-name.h (module 'core'): extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['short'])
## type-name.h (module 'core'): extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['signed char'])
## type-name.h (module 'core'): extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['unsigned long long'])
## type-name.h (module 'core'): extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['unsigned int'])
## type-name.h (module 'core'): extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['unsigned short'])
## type-name.h (module 'core'): extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['unsigned char'])
register_functions_ns3_CommandLineHelper(module.get_submodule('CommandLineHelper'), root_module)
register_functions_ns3_Config(module.get_submodule('Config'), root_module)
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_Hash(module.get_submodule('Hash'), root_module)
register_functions_ns3_SystemPath(module.get_submodule('SystemPath'), root_module)
register_functions_ns3_internal(module.get_submodule('internal'), root_module)
return
def register_functions_ns3_CommandLineHelper(module, root_module):
## command-line.h (module 'core'): extern bool ns3::CommandLineHelper::UserItemParse(std::string const value, bool & val) [free function]
module.add_function('UserItemParse',
'bool',
[param('std::string const', 'value'), param('bool &', 'val')],
template_parameters=['bool'])
return
def register_functions_ns3_Config(module, root_module):
## config.h (module 'core'): extern void ns3::Config::Connect(std::string path, ns3::CallbackBase const & cb) [free function]
module.add_function('Connect',
'void',
[param('std::string', 'path'), param('ns3::CallbackBase const &', 'cb')])
## config.h (module 'core'): extern void ns3::Config::ConnectWithoutContext(std::string path, ns3::CallbackBase const & cb) [free function]
module.add_function('ConnectWithoutContext',
'void',
[param('std::string', 'path'), param('ns3::CallbackBase const &', 'cb')])
## config.h (module 'core'): extern void ns3::Config::Disconnect(std::string path, ns3::CallbackBase const & cb) [free function]
module.add_function('Disconnect',
'void',
[param('std::string', 'path'), param('ns3::CallbackBase const &', 'cb')])
## config.h (module 'core'): extern void ns3::Config::DisconnectWithoutContext(std::string path, ns3::CallbackBase const & cb) [free function]
module.add_function('DisconnectWithoutContext',
'void',
[param('std::string', 'path'), param('ns3::CallbackBase const &', 'cb')])
## config.h (module 'core'): extern ns3::Ptr<ns3::Object> ns3::Config::GetRootNamespaceObject(uint32_t i) [free function]
module.add_function('GetRootNamespaceObject',
'ns3::Ptr< ns3::Object >',
[param('uint32_t', 'i')])
## config.h (module 'core'): extern uint32_t ns3::Config::GetRootNamespaceObjectN() [free function]
module.add_function('GetRootNamespaceObjectN',
'uint32_t',
[])
## config.h (module 'core'): extern ns3::Config::MatchContainer ns3::Config::LookupMatches(std::string path) [free function]
module.add_function('LookupMatches',
'ns3::Config::MatchContainer',
[param('std::string', 'path')])
## config.h (module 'core'): extern void ns3::Config::RegisterRootNamespaceObject(ns3::Ptr<ns3::Object> obj) [free function]
module.add_function('RegisterRootNamespaceObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'obj')])
## config.h (module 'core'): extern void ns3::Config::Reset() [free function]
module.add_function('Reset',
'void',
[])
## config.h (module 'core'): extern void ns3::Config::Set(std::string path, ns3::AttributeValue const & value) [free function]
module.add_function('Set',
'void',
[param('std::string', 'path'), param('ns3::AttributeValue const &', 'value')])
## config.h (module 'core'): extern void ns3::Config::SetDefault(std::string name, ns3::AttributeValue const & value) [free function]
module.add_function('SetDefault',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## config.h (module 'core'): extern bool ns3::Config::SetDefaultFailSafe(std::string name, ns3::AttributeValue const & value) [free function]
module.add_function('SetDefaultFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## config.h (module 'core'): extern void ns3::Config::SetGlobal(std::string name, ns3::AttributeValue const & value) [free function]
module.add_function('SetGlobal',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## config.h (module 'core'): extern bool ns3::Config::SetGlobalFailSafe(std::string name, ns3::AttributeValue const & value) [free function]
module.add_function('SetGlobalFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## config.h (module 'core'): extern void ns3::Config::UnregisterRootNamespaceObject(ns3::Ptr<ns3::Object> obj) [free function]
module.add_function('UnregisterRootNamespaceObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'obj')])
return
def register_functions_ns3_FatalImpl(module, root_module):
## fatal-impl.h (module 'core'): extern void ns3::FatalImpl::FlushStreams() [free function]
module.add_function('FlushStreams',
'void',
[])
## fatal-impl.h (module 'core'): extern void ns3::FatalImpl::RegisterStream(std::ostream * stream) [free function]
module.add_function('RegisterStream',
'void',
[param('std::ostream *', 'stream')])
## fatal-impl.h (module 'core'): extern void ns3::FatalImpl::UnregisterStream(std::ostream * stream) [free function]
module.add_function('UnregisterStream',
'void',
[param('std::ostream *', 'stream')])
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def register_functions_ns3_SystemPath(module, root_module):
## system-path.h (module 'core'): extern std::string ns3::SystemPath::Append(std::string left, std::string right) [free function]
module.add_function('Append',
'std::string',
[param('std::string', 'left'), param('std::string', 'right')])
## system-path.h (module 'core'): extern std::string ns3::SystemPath::FindSelfDirectory() [free function]
module.add_function('FindSelfDirectory',
'std::string',
[])
## system-path.h (module 'core'): extern std::string ns3::SystemPath::Join(std::_List_const_iterator<std::basic_string<char, std::char_traits<char>, std::allocator<char> > > begin, std::_List_const_iterator<std::basic_string<char, std::char_traits<char>, std::allocator<char> > > end) [free function]
module.add_function('Join',
'std::string',
[param('std::_List_const_iterator< std::basic_string< char, std::char_traits< char >, std::allocator< char > > >', 'begin'), param('std::_List_const_iterator< std::basic_string< char, std::char_traits< char >, std::allocator< char > > >', 'end')])
## system-path.h (module 'core'): extern void ns3::SystemPath::MakeDirectories(std::string path) [free function]
module.add_function('MakeDirectories',
'void',
[param('std::string', 'path')])
## system-path.h (module 'core'): extern std::string ns3::SystemPath::MakeTemporaryDirectoryName() [free function]
module.add_function('MakeTemporaryDirectoryName',
'std::string',
[])
## system-path.h (module 'core'): extern std::list<std::string, std::allocator<std::string> > ns3::SystemPath::ReadFiles(std::string path) [free function]
module.add_function('ReadFiles',
'std::list< std::string >',
[param('std::string', 'path')])
## system-path.h (module 'core'): extern std::list<std::string, std::allocator<std::string> > ns3::SystemPath::Split(std::string path) [free function]
module.add_function('Split',
'std::list< std::string >',
[param('std::string', 'path')])
return
def register_functions_ns3_internal(module, root_module):
## double.h (module 'core'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::internal::MakeDoubleChecker(double min, double max, std::string name) [free function]
module.add_function('MakeDoubleChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[param('double', 'min'), param('double', 'max'), param('std::string', 'name')])
## integer.h (module 'core'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::internal::MakeIntegerChecker(int64_t min, int64_t max, std::string name) [free function]
module.add_function('MakeIntegerChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[param('int64_t', 'min'), param('int64_t', 'max'), param('std::string', 'name')])
## uinteger.h (module 'core'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::internal::MakeUintegerChecker(uint64_t min, uint64_t max, std::string name) [free function]
module.add_function('MakeUintegerChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[param('uint64_t', 'min'), param('uint64_t', 'max'), param('std::string', 'name')])
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
|
dankolbman/NumericalAnalysis
|
refs/heads/master
|
Homeworks/HW1/Problem5.py
|
1
|
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('text', usetex=True)
def g1(x):
#return x + 0.1*(1/(2*x**2-6))
return (1/(2*x**2-6))
def g2(x):
return 3/x+1/(2*x**2)
def g3(x):
return (3*x+1/2)**(1/3.0)
def g4(x):
return x - 1/(6*x**2-6)*(2*x**3-6*x-1)
def fpi(x, thresh, max_step, f):
itr = []
diffs = []
#print("x_new = " + str(x_new) + "\tx_old = " \
# + str(x) + "\tdiff = " + str(d))
for i in range(max_step):
itr.append(i)
x_new = f(x)
d = x_new - x
diffs.append(abs(d))
x = x_new
if(abs(d) < thresh):
return x, itr, diffs
return x, itr, diffs
print("g1(x):")
x, itr, diffs = fpi(-5, 1e-10, 100, g1)
print(x)
plt.plot(itr, diffs)
plt.gca().set_yscale('log')
plt.grid()
plt.title(r'FPI Convergence for $g_1(x)$', fontsize=28)
plt.ylabel('$log(|f(x_n)|)$', fontsize=24)
plt.xlabel('step, n', fontsize=24)
plt.savefig('Problem5a.png')
#plt.show()
print("g2(x):")
plt.close()
x, itr, diffs = fpi(-1, 1e-10, 100, g2)
print(x)
plt.plot(itr, diffs)
plt.gca().set_yscale('log')
plt.grid()
plt.title(r'FPI Convergence for $g_2(x)$', fontsize=28)
plt.ylabel('$log(|f(x_n)|)$', fontsize=24)
plt.xlabel('step, n', fontsize=24)
plt.savefig('Problem5b.png')
#plt.show()
print("g3(x):")
plt.close()
x, itr, diffs = fpi(3, 1e-10, 100, g3)
print(x)
x, itr, diffs = fpi(-0.1, 1e-10, 100, g3)
print(x)
plt.plot(itr, diffs)
plt.gca().set_yscale('log')
plt.grid()
plt.title(r'FPI Convergence for $g_3(x)$',fontsize=28)
plt.ylabel('$log(|f(x_n)|)$',fontsize=24)
plt.xlabel('step, n',fontsize=24)
plt.savefig('Problem5c.png')
#plt.show()
print("g4(x):")
plt.close()
x, itr, diffs = fpi(3, 1e-10, 100, g4)
print(x)
plt.plot(itr, diffs)
plt.gca().set_yscale('log')
plt.grid()
plt.title(r'FPI Convergence for $g_4(x)$ on $x=1.81$',fontsize=28)
plt.ylabel('$log(|f(x_n)|)$',fontsize=24)
plt.xlabel('step, n',fontsize=24)
plt.savefig('Problem5d.png')
plt.show()
plt.close()
x, itr, diffs = fpi(-0.1, 1e-10, 100, g4)
print(x)
plt.plot(itr, diffs)
plt.gca().set_yscale('log')
plt.grid()
plt.title(r'FPI Convergence for $g_4(x)$ on $x=-0.108$',fontsize=28)
plt.ylabel('$log(|f(x_n)|)$',fontsize=24)
plt.xlabel('step, n',fontsize=24)
plt.savefig('Problem5e.png')
plt.show()
plt.close()
x, itr, diffs = fpi(-1.1, 1e-10, 100, g4)
print(x)
plt.plot(itr, diffs)
plt.gca().set_yscale('log')
plt.grid()
plt.title(r'FPI Convergence for $g_4(x)$ on $x=-1.64$',fontsize=28)
plt.ylabel('$log(|f(x_n)|)$',fontsize=24)
plt.xlabel('step, n',fontsize=24)
plt.savefig('Problem5f.png')
plt.show()
|
ynikulin/tapp
|
refs/heads/master
|
tapp/fixtures/app/settings/__init__.py
|
1
|
# Define global APP settings
#
# local APP settings
try:
from .local import *
except ImportError, e:
pass
|
valkjsaaa/sl4a
|
refs/heads/master
|
python/src/Lib/test/pystone.py
|
189
|
#! /usr/bin/env python
"""
"PYSTONE" Benchmark Program
Version: Python/1.1 (corresponds to C/1.1 plus 2 Pystone fixes)
Author: Reinhold P. Weicker, CACM Vol 27, No 10, 10/84 pg. 1013.
Translated from ADA to C by Rick Richardson.
Every method to preserve ADA-likeness has been used,
at the expense of C-ness.
Translated from C to Python by Guido van Rossum.
Version History:
Version 1.1 corrects two bugs in version 1.0:
First, it leaked memory: in Proc1(), NextRecord ends
up having a pointer to itself. I have corrected this
by zapping NextRecord.PtrComp at the end of Proc1().
Second, Proc3() used the operator != to compare a
record to None. This is rather inefficient and not
true to the intention of the original benchmark (where
a pointer comparison to None is intended; the !=
operator attempts to find a method __cmp__ to do value
comparison of the record). Version 1.1 runs 5-10
percent faster than version 1.0, so benchmark figures
of different versions can't be compared directly.
"""
LOOPS = 50000
from time import clock
__version__ = "1.1"
[Ident1, Ident2, Ident3, Ident4, Ident5] = range(1, 6)
class Record:
def __init__(self, PtrComp = None, Discr = 0, EnumComp = 0,
IntComp = 0, StringComp = 0):
self.PtrComp = PtrComp
self.Discr = Discr
self.EnumComp = EnumComp
self.IntComp = IntComp
self.StringComp = StringComp
def copy(self):
return Record(self.PtrComp, self.Discr, self.EnumComp,
self.IntComp, self.StringComp)
TRUE = 1
FALSE = 0
def main(loops=LOOPS):
benchtime, stones = pystones(loops)
print "Pystone(%s) time for %d passes = %g" % \
(__version__, loops, benchtime)
print "This machine benchmarks at %g pystones/second" % stones
def pystones(loops=LOOPS):
return Proc0(loops)
IntGlob = 0
BoolGlob = FALSE
Char1Glob = '\0'
Char2Glob = '\0'
Array1Glob = [0]*51
Array2Glob = map(lambda x: x[:], [Array1Glob]*51)
PtrGlb = None
PtrGlbNext = None
def Proc0(loops=LOOPS):
global IntGlob
global BoolGlob
global Char1Glob
global Char2Glob
global Array1Glob
global Array2Glob
global PtrGlb
global PtrGlbNext
starttime = clock()
for i in range(loops):
pass
nulltime = clock() - starttime
PtrGlbNext = Record()
PtrGlb = Record()
PtrGlb.PtrComp = PtrGlbNext
PtrGlb.Discr = Ident1
PtrGlb.EnumComp = Ident3
PtrGlb.IntComp = 40
PtrGlb.StringComp = "DHRYSTONE PROGRAM, SOME STRING"
String1Loc = "DHRYSTONE PROGRAM, 1'ST STRING"
Array2Glob[8][7] = 10
starttime = clock()
for i in range(loops):
Proc5()
Proc4()
IntLoc1 = 2
IntLoc2 = 3
String2Loc = "DHRYSTONE PROGRAM, 2'ND STRING"
EnumLoc = Ident2
BoolGlob = not Func2(String1Loc, String2Loc)
while IntLoc1 < IntLoc2:
IntLoc3 = 5 * IntLoc1 - IntLoc2
IntLoc3 = Proc7(IntLoc1, IntLoc2)
IntLoc1 = IntLoc1 + 1
Proc8(Array1Glob, Array2Glob, IntLoc1, IntLoc3)
PtrGlb = Proc1(PtrGlb)
CharIndex = 'A'
while CharIndex <= Char2Glob:
if EnumLoc == Func1(CharIndex, 'C'):
EnumLoc = Proc6(Ident1)
CharIndex = chr(ord(CharIndex)+1)
IntLoc3 = IntLoc2 * IntLoc1
IntLoc2 = IntLoc3 / IntLoc1
IntLoc2 = 7 * (IntLoc3 - IntLoc2) - IntLoc1
IntLoc1 = Proc2(IntLoc1)
benchtime = clock() - starttime - nulltime
if benchtime == 0.0:
loopsPerBenchtime = 0.0
else:
loopsPerBenchtime = (loops / benchtime)
return benchtime, loopsPerBenchtime
def Proc1(PtrParIn):
PtrParIn.PtrComp = NextRecord = PtrGlb.copy()
PtrParIn.IntComp = 5
NextRecord.IntComp = PtrParIn.IntComp
NextRecord.PtrComp = PtrParIn.PtrComp
NextRecord.PtrComp = Proc3(NextRecord.PtrComp)
if NextRecord.Discr == Ident1:
NextRecord.IntComp = 6
NextRecord.EnumComp = Proc6(PtrParIn.EnumComp)
NextRecord.PtrComp = PtrGlb.PtrComp
NextRecord.IntComp = Proc7(NextRecord.IntComp, 10)
else:
PtrParIn = NextRecord.copy()
NextRecord.PtrComp = None
return PtrParIn
def Proc2(IntParIO):
IntLoc = IntParIO + 10
while 1:
if Char1Glob == 'A':
IntLoc = IntLoc - 1
IntParIO = IntLoc - IntGlob
EnumLoc = Ident1
if EnumLoc == Ident1:
break
return IntParIO
def Proc3(PtrParOut):
global IntGlob
if PtrGlb is not None:
PtrParOut = PtrGlb.PtrComp
else:
IntGlob = 100
PtrGlb.IntComp = Proc7(10, IntGlob)
return PtrParOut
def Proc4():
global Char2Glob
BoolLoc = Char1Glob == 'A'
BoolLoc = BoolLoc or BoolGlob
Char2Glob = 'B'
def Proc5():
global Char1Glob
global BoolGlob
Char1Glob = 'A'
BoolGlob = FALSE
def Proc6(EnumParIn):
EnumParOut = EnumParIn
if not Func3(EnumParIn):
EnumParOut = Ident4
if EnumParIn == Ident1:
EnumParOut = Ident1
elif EnumParIn == Ident2:
if IntGlob > 100:
EnumParOut = Ident1
else:
EnumParOut = Ident4
elif EnumParIn == Ident3:
EnumParOut = Ident2
elif EnumParIn == Ident4:
pass
elif EnumParIn == Ident5:
EnumParOut = Ident3
return EnumParOut
def Proc7(IntParI1, IntParI2):
IntLoc = IntParI1 + 2
IntParOut = IntParI2 + IntLoc
return IntParOut
def Proc8(Array1Par, Array2Par, IntParI1, IntParI2):
global IntGlob
IntLoc = IntParI1 + 5
Array1Par[IntLoc] = IntParI2
Array1Par[IntLoc+1] = Array1Par[IntLoc]
Array1Par[IntLoc+30] = IntLoc
for IntIndex in range(IntLoc, IntLoc+2):
Array2Par[IntLoc][IntIndex] = IntLoc
Array2Par[IntLoc][IntLoc-1] = Array2Par[IntLoc][IntLoc-1] + 1
Array2Par[IntLoc+20][IntLoc] = Array1Par[IntLoc]
IntGlob = 5
def Func1(CharPar1, CharPar2):
CharLoc1 = CharPar1
CharLoc2 = CharLoc1
if CharLoc2 != CharPar2:
return Ident1
else:
return Ident2
def Func2(StrParI1, StrParI2):
IntLoc = 1
while IntLoc <= 1:
if Func1(StrParI1[IntLoc], StrParI2[IntLoc+1]) == Ident1:
CharLoc = 'A'
IntLoc = IntLoc + 1
if CharLoc >= 'W' and CharLoc <= 'Z':
IntLoc = 7
if CharLoc == 'X':
return TRUE
else:
if StrParI1 > StrParI2:
IntLoc = IntLoc + 7
return TRUE
else:
return FALSE
def Func3(EnumParIn):
EnumLoc = EnumParIn
if EnumLoc == Ident3: return TRUE
return FALSE
if __name__ == '__main__':
import sys
def error(msg):
print >>sys.stderr, msg,
print >>sys.stderr, "usage: %s [number_of_loops]" % sys.argv[0]
sys.exit(100)
nargs = len(sys.argv) - 1
if nargs > 1:
error("%d arguments are too many;" % nargs)
elif nargs == 1:
try: loops = int(sys.argv[1])
except ValueError:
error("Invalid argument %r;" % sys.argv[1])
else:
loops = LOOPS
main(loops)
|
dd00/commandergenius
|
refs/heads/dd00
|
project/jni/python/src/Lib/bsddb/__init__.py
|
39
|
#----------------------------------------------------------------------
# Copyright (c) 1999-2001, Digital Creations, Fredericksburg, VA, USA
# and Andrew Kuchling. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# o Redistributions of source code must retain the above copyright
# notice, this list of conditions, and the disclaimer that follows.
#
# o Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions, and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# o Neither the name of Digital Creations nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY DIGITAL CREATIONS AND CONTRIBUTORS *AS
# IS* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DIGITAL
# CREATIONS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#----------------------------------------------------------------------
"""Support for Berkeley DB 4.0 through 4.7 with a simple interface.
For the full featured object oriented interface use the bsddb.db module
instead. It mirrors the Oracle Berkeley DB C API.
"""
import sys
absolute_import = (sys.version_info[0] >= 3)
if sys.py3kwarning:
import warnings
warnings.warnpy3k("in 3.x, bsddb has been removed; "
"please use the pybsddb project instead",
DeprecationWarning, 2)
try:
if __name__ == 'bsddb3':
# import _pybsddb binary as it should be the more recent version from
# a standalone pybsddb addon package than the version included with
# python as bsddb._bsddb.
if absolute_import :
# Because this syntaxis is not valid before Python 2.5
exec("from . import _pybsddb")
else :
import _pybsddb
_bsddb = _pybsddb
from bsddb3.dbutils import DeadlockWrap as _DeadlockWrap
else:
import _bsddb
from bsddb.dbutils import DeadlockWrap as _DeadlockWrap
except ImportError:
# Remove ourselves from sys.modules
import sys
del sys.modules[__name__]
raise
# bsddb3 calls it db, but provide _db for backwards compatibility
db = _db = _bsddb
__version__ = db.__version__
error = db.DBError # So bsddb.error will mean something...
#----------------------------------------------------------------------
import sys, os
from weakref import ref
if sys.version_info[0:2] <= (2, 5) :
import UserDict
MutableMapping = UserDict.DictMixin
else :
import collections
MutableMapping = collections.MutableMapping
class _iter_mixin(MutableMapping):
def _make_iter_cursor(self):
cur = _DeadlockWrap(self.db.cursor)
key = id(cur)
self._cursor_refs[key] = ref(cur, self._gen_cref_cleaner(key))
return cur
def _gen_cref_cleaner(self, key):
# use generate the function for the weakref callback here
# to ensure that we do not hold a strict reference to cur
# in the callback.
return lambda ref: self._cursor_refs.pop(key, None)
def __iter__(self):
self._kill_iteration = False
self._in_iter += 1
try:
try:
cur = self._make_iter_cursor()
# FIXME-20031102-greg: race condition. cursor could
# be closed by another thread before this call.
# since we're only returning keys, we call the cursor
# methods with flags=0, dlen=0, dofs=0
key = _DeadlockWrap(cur.first, 0,0,0)[0]
yield key
next = getattr(cur, "next")
while 1:
try:
key = _DeadlockWrap(next, 0,0,0)[0]
yield key
except _bsddb.DBCursorClosedError:
if self._kill_iteration:
raise RuntimeError('Database changed size '
'during iteration.')
cur = self._make_iter_cursor()
# FIXME-20031101-greg: race condition. cursor could
# be closed by another thread before this call.
_DeadlockWrap(cur.set, key,0,0,0)
next = getattr(cur, "next")
except _bsddb.DBNotFoundError:
pass
except _bsddb.DBCursorClosedError:
# the database was modified during iteration. abort.
pass
# When Python 2.3 not supported in bsddb3, we can change this to "finally"
except :
self._in_iter -= 1
raise
self._in_iter -= 1
def iteritems(self):
if not self.db:
return
self._kill_iteration = False
self._in_iter += 1
try:
try:
cur = self._make_iter_cursor()
# FIXME-20031102-greg: race condition. cursor could
# be closed by another thread before this call.
kv = _DeadlockWrap(cur.first)
key = kv[0]
yield kv
next = getattr(cur, "next")
while 1:
try:
kv = _DeadlockWrap(next)
key = kv[0]
yield kv
except _bsddb.DBCursorClosedError:
if self._kill_iteration:
raise RuntimeError('Database changed size '
'during iteration.')
cur = self._make_iter_cursor()
# FIXME-20031101-greg: race condition. cursor could
# be closed by another thread before this call.
_DeadlockWrap(cur.set, key,0,0,0)
next = getattr(cur, "next")
except _bsddb.DBNotFoundError:
pass
except _bsddb.DBCursorClosedError:
# the database was modified during iteration. abort.
pass
# When Python 2.3 not supported in bsddb3, we can change this to "finally"
except :
self._in_iter -= 1
raise
self._in_iter -= 1
class _DBWithCursor(_iter_mixin):
"""
A simple wrapper around DB that makes it look like the bsddbobject in
the old module. It uses a cursor as needed to provide DB traversal.
"""
def __init__(self, db):
self.db = db
self.db.set_get_returns_none(0)
# FIXME-20031101-greg: I believe there is still the potential
# for deadlocks in a multithreaded environment if someone
# attempts to use the any of the cursor interfaces in one
# thread while doing a put or delete in another thread. The
# reason is that _checkCursor and _closeCursors are not atomic
# operations. Doing our own locking around self.dbc,
# self.saved_dbc_key and self._cursor_refs could prevent this.
# TODO: A test case demonstrating the problem needs to be written.
# self.dbc is a DBCursor object used to implement the
# first/next/previous/last/set_location methods.
self.dbc = None
self.saved_dbc_key = None
# a collection of all DBCursor objects currently allocated
# by the _iter_mixin interface.
self._cursor_refs = {}
self._in_iter = 0
self._kill_iteration = False
def __del__(self):
self.close()
def _checkCursor(self):
if self.dbc is None:
self.dbc = _DeadlockWrap(self.db.cursor)
if self.saved_dbc_key is not None:
_DeadlockWrap(self.dbc.set, self.saved_dbc_key)
self.saved_dbc_key = None
# This method is needed for all non-cursor DB calls to avoid
# Berkeley DB deadlocks (due to being opened with DB_INIT_LOCK
# and DB_THREAD to be thread safe) when intermixing database
# operations that use the cursor internally with those that don't.
def _closeCursors(self, save=1):
if self.dbc:
c = self.dbc
self.dbc = None
if save:
try:
self.saved_dbc_key = _DeadlockWrap(c.current, 0,0,0)[0]
except db.DBError:
pass
_DeadlockWrap(c.close)
del c
for cref in self._cursor_refs.values():
c = cref()
if c is not None:
_DeadlockWrap(c.close)
def _checkOpen(self):
if self.db is None:
raise error, "BSDDB object has already been closed"
def isOpen(self):
return self.db is not None
def __len__(self):
self._checkOpen()
return _DeadlockWrap(lambda: len(self.db)) # len(self.db)
if sys.version_info[0:2] >= (2, 6) :
def __repr__(self) :
if self.isOpen() :
return repr(dict(_DeadlockWrap(self.db.items)))
return repr(dict())
def __getitem__(self, key):
self._checkOpen()
return _DeadlockWrap(lambda: self.db[key]) # self.db[key]
def __setitem__(self, key, value):
self._checkOpen()
self._closeCursors()
if self._in_iter and key not in self:
self._kill_iteration = True
def wrapF():
self.db[key] = value
_DeadlockWrap(wrapF) # self.db[key] = value
def __delitem__(self, key):
self._checkOpen()
self._closeCursors()
if self._in_iter and key in self:
self._kill_iteration = True
def wrapF():
del self.db[key]
_DeadlockWrap(wrapF) # del self.db[key]
def close(self):
self._closeCursors(save=0)
if self.dbc is not None:
_DeadlockWrap(self.dbc.close)
v = 0
if self.db is not None:
v = _DeadlockWrap(self.db.close)
self.dbc = None
self.db = None
return v
def keys(self):
self._checkOpen()
return _DeadlockWrap(self.db.keys)
def has_key(self, key):
self._checkOpen()
return _DeadlockWrap(self.db.has_key, key)
def set_location(self, key):
self._checkOpen()
self._checkCursor()
return _DeadlockWrap(self.dbc.set_range, key)
def next(self): # Renamed by "2to3"
self._checkOpen()
self._checkCursor()
rv = _DeadlockWrap(getattr(self.dbc, "next"))
return rv
if sys.version_info[0] >= 3 : # For "2to3" conversion
next = __next__
def previous(self):
self._checkOpen()
self._checkCursor()
rv = _DeadlockWrap(self.dbc.prev)
return rv
def first(self):
self._checkOpen()
# fix 1725856: don't needlessly try to restore our cursor position
self.saved_dbc_key = None
self._checkCursor()
rv = _DeadlockWrap(self.dbc.first)
return rv
def last(self):
self._checkOpen()
# fix 1725856: don't needlessly try to restore our cursor position
self.saved_dbc_key = None
self._checkCursor()
rv = _DeadlockWrap(self.dbc.last)
return rv
def sync(self):
self._checkOpen()
return _DeadlockWrap(self.db.sync)
#----------------------------------------------------------------------
# Compatibility object factory functions
def hashopen(file, flag='c', mode=0666, pgsize=None, ffactor=None, nelem=None,
cachesize=None, lorder=None, hflags=0):
flags = _checkflag(flag, file)
e = _openDBEnv(cachesize)
d = db.DB(e)
d.set_flags(hflags)
if pgsize is not None: d.set_pagesize(pgsize)
if lorder is not None: d.set_lorder(lorder)
if ffactor is not None: d.set_h_ffactor(ffactor)
if nelem is not None: d.set_h_nelem(nelem)
d.open(file, db.DB_HASH, flags, mode)
return _DBWithCursor(d)
#----------------------------------------------------------------------
def btopen(file, flag='c', mode=0666,
btflags=0, cachesize=None, maxkeypage=None, minkeypage=None,
pgsize=None, lorder=None):
flags = _checkflag(flag, file)
e = _openDBEnv(cachesize)
d = db.DB(e)
if pgsize is not None: d.set_pagesize(pgsize)
if lorder is not None: d.set_lorder(lorder)
d.set_flags(btflags)
if minkeypage is not None: d.set_bt_minkey(minkeypage)
if maxkeypage is not None: d.set_bt_maxkey(maxkeypage)
d.open(file, db.DB_BTREE, flags, mode)
return _DBWithCursor(d)
#----------------------------------------------------------------------
def rnopen(file, flag='c', mode=0666,
rnflags=0, cachesize=None, pgsize=None, lorder=None,
rlen=None, delim=None, source=None, pad=None):
flags = _checkflag(flag, file)
e = _openDBEnv(cachesize)
d = db.DB(e)
if pgsize is not None: d.set_pagesize(pgsize)
if lorder is not None: d.set_lorder(lorder)
d.set_flags(rnflags)
if delim is not None: d.set_re_delim(delim)
if rlen is not None: d.set_re_len(rlen)
if source is not None: d.set_re_source(source)
if pad is not None: d.set_re_pad(pad)
d.open(file, db.DB_RECNO, flags, mode)
return _DBWithCursor(d)
#----------------------------------------------------------------------
def _openDBEnv(cachesize):
e = db.DBEnv()
if cachesize is not None:
if cachesize >= 20480:
e.set_cachesize(0, cachesize)
else:
raise error, "cachesize must be >= 20480"
e.set_lk_detect(db.DB_LOCK_DEFAULT)
e.open('.', db.DB_PRIVATE | db.DB_CREATE | db.DB_THREAD | db.DB_INIT_LOCK | db.DB_INIT_MPOOL)
return e
def _checkflag(flag, file):
if flag == 'r':
flags = db.DB_RDONLY
elif flag == 'rw':
flags = 0
elif flag == 'w':
flags = db.DB_CREATE
elif flag == 'c':
flags = db.DB_CREATE
elif flag == 'n':
flags = db.DB_CREATE
#flags = db.DB_CREATE | db.DB_TRUNCATE
# we used db.DB_TRUNCATE flag for this before but Berkeley DB
# 4.2.52 changed to disallowed truncate with txn environments.
if file is not None and os.path.isfile(file):
os.unlink(file)
else:
raise error, "flags should be one of 'r', 'w', 'c' or 'n'"
return flags | db.DB_THREAD
#----------------------------------------------------------------------
# This is a silly little hack that allows apps to continue to use the
# DB_THREAD flag even on systems without threads without freaking out
# Berkeley DB.
#
# This assumes that if Python was built with thread support then
# Berkeley DB was too.
try:
import thread
del thread
except ImportError:
db.DB_THREAD = 0
#----------------------------------------------------------------------
|
KrzysiekJ/django-terminator
|
refs/heads/master
|
setup.py
|
1
|
#!/usr/bin/env python
from setuptools import setup
VERSION = '0.1.1'
def read(file_name):
with open(file_name, 'r') as f:
return f.read()
setup(
name='django-terminator',
description='''One time method executor for Django models''',
long_description=read('README.rst'),
version=str(VERSION),
author='Krzysztof Jurewicz',
author_email='krzysztof.jurewicz@gmail.com',
url='http://github.com/KrzysiekJ/django-terminator',
packages=['terminator'],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
)
|
zanderle/django
|
refs/heads/master
|
tests/admin_changelist/urls.py
|
810
|
from django.conf.urls import url
from . import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
|
Greennut/ostproject
|
refs/heads/master
|
django/contrib/localflavor/de/de_states.py
|
544
|
# -*- coding: utf-8 -*
from django.utils.translation import ugettext_lazy as _
STATE_CHOICES = (
('BW', _('Baden-Wuerttemberg')),
('BY', _('Bavaria')),
('BE', _('Berlin')),
('BB', _('Brandenburg')),
('HB', _('Bremen')),
('HH', _('Hamburg')),
('HE', _('Hessen')),
('MV', _('Mecklenburg-Western Pomerania')),
('NI', _('Lower Saxony')),
('NW', _('North Rhine-Westphalia')),
('RP', _('Rhineland-Palatinate')),
('SL', _('Saarland')),
('SN', _('Saxony')),
('ST', _('Saxony-Anhalt')),
('SH', _('Schleswig-Holstein')),
('TH', _('Thuringia')),
)
|
sergiorua/libcloud
|
refs/heads/trunk
|
docs/examples/compute/cloudsigma/list_sizes_images_drives.py
|
63
|
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
cls = get_driver(Provider.CLOUDSIGMA)
driver = cls('username', 'password', region='zrh', api_version='2.0')
sizes = driver.list_sizes()
print(sizes)
images = driver.list_images()
print(images)
drives = driver.ex_list_library_drives()
print(drives)
|
zzzeek/sqlalchemy
|
refs/heads/master
|
lib/sqlalchemy/orm/collections.py
|
3
|
# orm/collections.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for collections of mapped entities.
The collections package supplies the machinery used to inform the ORM of
collection membership changes. An instrumentation via decoration approach is
used, allowing arbitrary types (including built-ins) to be used as entity
collections without requiring inheritance from a base class.
Instrumentation decoration relays membership change events to the
:class:`.CollectionAttributeImpl` that is currently managing the collection.
The decorators observe function call arguments and return values, tracking
entities entering or leaving the collection. Two decorator approaches are
provided. One is a bundle of generic decorators that map function arguments
and return values to events::
from sqlalchemy.orm.collections import collection
class MyClass(object):
# ...
@collection.adds(1)
def store(self, item):
self.data.append(item)
@collection.removes_return()
def pop(self):
return self.data.pop()
The second approach is a bundle of targeted decorators that wrap appropriate
append and remove notifiers around the mutation methods present in the
standard Python ``list``, ``set`` and ``dict`` interfaces. These could be
specified in terms of generic decorator recipes, but are instead hand-tooled
for increased efficiency. The targeted decorators occasionally implement
adapter-like behavior, such as mapping bulk-set methods (``extend``,
``update``, ``__setslice__``, etc.) into the series of atomic mutation events
that the ORM requires.
The targeted decorators are used internally for automatic instrumentation of
entity collection classes. Every collection class goes through a
transformation process roughly like so:
1. If the class is a built-in, substitute a trivial sub-class
2. Is this class already instrumented?
3. Add in generic decorators
4. Sniff out the collection interface through duck-typing
5. Add targeted decoration to any undecorated interface method
This process modifies the class at runtime, decorating methods and adding some
bookkeeping properties. This isn't possible (or desirable) for built-in
classes like ``list``, so trivial sub-classes are substituted to hold
decoration::
class InstrumentedList(list):
pass
Collection classes can be specified in ``relationship(collection_class=)`` as
types or a function that returns an instance. Collection classes are
inspected and instrumented during the mapper compilation phase. The
collection_class callable will be executed once to produce a specimen
instance, and the type of that specimen will be instrumented. Functions that
return built-in types like ``lists`` will be adapted to produce instrumented
instances.
When extending a known type like ``list``, additional decorations are not
generally not needed. Odds are, the extension method will delegate to a
method that's already instrumented. For example::
class QueueIsh(list):
def push(self, item):
self.append(item)
def shift(self):
return self.pop(0)
There's no need to decorate these methods. ``append`` and ``pop`` are already
instrumented as part of the ``list`` interface. Decorating them would fire
duplicate events, which should be avoided.
The targeted decoration tries not to rely on other methods in the underlying
collection class, but some are unavoidable. Many depend on 'read' methods
being present to properly instrument a 'write', for example, ``__setitem__``
needs ``__getitem__``. "Bulk" methods like ``update`` and ``extend`` may also
reimplemented in terms of atomic appends and removes, so the ``extend``
decoration will actually perform many ``append`` operations and not call the
underlying method at all.
Tight control over bulk operation and the firing of events is also possible by
implementing the instrumentation internally in your methods. The basic
instrumentation package works under the general assumption that collection
mutation will not raise unusual exceptions. If you want to closely
orchestrate append and remove events with exception management, internal
instrumentation may be the answer. Within your method,
``collection_adapter(self)`` will retrieve an object that you can use for
explicit control over triggering append and remove events.
The owning object and :class:`.CollectionAttributeImpl` are also reachable
through the adapter, allowing for some very sophisticated behavior.
"""
import operator
import weakref
from sqlalchemy.util.compat import inspect_getfullargspec
from . import base
from .. import exc as sa_exc
from .. import util
from ..sql import coercions
from ..sql import expression
from ..sql import roles
__all__ = [
"collection",
"collection_adapter",
"mapped_collection",
"column_mapped_collection",
"attribute_mapped_collection",
]
__instrumentation_mutex = util.threading.Lock()
class _PlainColumnGetter(object):
"""Plain column getter, stores collection of Column objects
directly.
Serializes to a :class:`._SerializableColumnGetterV2`
which has more expensive __call__() performance
and some rare caveats.
"""
def __init__(self, cols):
self.cols = cols
self.composite = len(cols) > 1
def __reduce__(self):
return _SerializableColumnGetterV2._reduce_from_cols(self.cols)
def _cols(self, mapper):
return self.cols
def __call__(self, value):
state = base.instance_state(value)
m = base._state_mapper(state)
key = [
m._get_state_attr_by_column(state, state.dict, col)
for col in self._cols(m)
]
if self.composite:
return tuple(key)
else:
return key[0]
class _SerializableColumnGetter(object):
"""Column-based getter used in version 0.7.6 only.
Remains here for pickle compatibility with 0.7.6.
"""
def __init__(self, colkeys):
self.colkeys = colkeys
self.composite = len(colkeys) > 1
def __reduce__(self):
return _SerializableColumnGetter, (self.colkeys,)
def __call__(self, value):
state = base.instance_state(value)
m = base._state_mapper(state)
key = [
m._get_state_attr_by_column(
state, state.dict, m.mapped_table.columns[k]
)
for k in self.colkeys
]
if self.composite:
return tuple(key)
else:
return key[0]
class _SerializableColumnGetterV2(_PlainColumnGetter):
"""Updated serializable getter which deals with
multi-table mapped classes.
Two extremely unusual cases are not supported.
Mappings which have tables across multiple metadata
objects, or which are mapped to non-Table selectables
linked across inheriting mappers may fail to function
here.
"""
def __init__(self, colkeys):
self.colkeys = colkeys
self.composite = len(colkeys) > 1
def __reduce__(self):
return self.__class__, (self.colkeys,)
@classmethod
def _reduce_from_cols(cls, cols):
def _table_key(c):
if not isinstance(c.table, expression.TableClause):
return None
else:
return c.table.key
colkeys = [(c.key, _table_key(c)) for c in cols]
return _SerializableColumnGetterV2, (colkeys,)
def _cols(self, mapper):
cols = []
metadata = getattr(mapper.local_table, "metadata", None)
for (ckey, tkey) in self.colkeys:
if tkey is None or metadata is None or tkey not in metadata:
cols.append(mapper.local_table.c[ckey])
else:
cols.append(metadata.tables[tkey].c[ckey])
return cols
def column_mapped_collection(mapping_spec):
"""A dictionary-based collection type with column-based keying.
Returns a :class:`.MappedCollection` factory with a keying function
generated from mapping_spec, which may be a Column or a sequence
of Columns.
The key value must be immutable for the lifetime of the object. You
can not, for example, map on foreign key values if those key values will
change during the session, i.e. from None to a database-assigned integer
after a session flush.
"""
cols = [
coercions.expect(roles.ColumnArgumentRole, q, argname="mapping_spec")
for q in util.to_list(mapping_spec)
]
keyfunc = _PlainColumnGetter(cols)
return lambda: MappedCollection(keyfunc)
class _SerializableAttrGetter(object):
def __init__(self, name):
self.name = name
self.getter = operator.attrgetter(name)
def __call__(self, target):
return self.getter(target)
def __reduce__(self):
return _SerializableAttrGetter, (self.name,)
def attribute_mapped_collection(attr_name):
"""A dictionary-based collection type with attribute-based keying.
Returns a :class:`.MappedCollection` factory with a keying based on the
'attr_name' attribute of entities in the collection, where ``attr_name``
is the string name of the attribute.
.. warning:: the key value must be assigned to its final value
**before** it is accessed by the attribute mapped collection.
Additionally, changes to the key attribute are **not tracked**
automatically, which means the key in the dictionary is not
automatically synchronized with the key value on the target object
itself. See the section :ref:`key_collections_mutations`
for an example.
"""
getter = _SerializableAttrGetter(attr_name)
return lambda: MappedCollection(getter)
def mapped_collection(keyfunc):
"""A dictionary-based collection type with arbitrary keying.
Returns a :class:`.MappedCollection` factory with a keying function
generated from keyfunc, a callable that takes an entity and returns a
key value.
The key value must be immutable for the lifetime of the object. You
can not, for example, map on foreign key values if those key values will
change during the session, i.e. from None to a database-assigned integer
after a session flush.
"""
return lambda: MappedCollection(keyfunc)
class collection(object):
"""Decorators for entity collection classes.
The decorators fall into two groups: annotations and interception recipes.
The annotating decorators (appender, remover, iterator, converter,
internally_instrumented) indicate the method's purpose and take no
arguments. They are not written with parens::
@collection.appender
def append(self, append): ...
The recipe decorators all require parens, even those that take no
arguments::
@collection.adds('entity')
def insert(self, position, entity): ...
@collection.removes_return()
def popitem(self): ...
"""
# Bundled as a class solely for ease of use: packaging, doc strings,
# importability.
@staticmethod
def appender(fn):
"""Tag the method as the collection appender.
The appender method is called with one positional argument: the value
to append. The method will be automatically decorated with 'adds(1)'
if not already decorated::
@collection.appender
def add(self, append): ...
# or, equivalently
@collection.appender
@collection.adds(1)
def add(self, append): ...
# for mapping type, an 'append' may kick out a previous value
# that occupies that slot. consider d['a'] = 'foo'- any previous
# value in d['a'] is discarded.
@collection.appender
@collection.replaces(1)
def add(self, entity):
key = some_key_func(entity)
previous = None
if key in self:
previous = self[key]
self[key] = entity
return previous
If the value to append is not allowed in the collection, you may
raise an exception. Something to remember is that the appender
will be called for each object mapped by a database query. If the
database contains rows that violate your collection semantics, you
will need to get creative to fix the problem, as access via the
collection will not work.
If the appender method is internally instrumented, you must also
receive the keyword argument '_sa_initiator' and ensure its
promulgation to collection events.
"""
fn._sa_instrument_role = "appender"
return fn
@staticmethod
def remover(fn):
"""Tag the method as the collection remover.
The remover method is called with one positional argument: the value
to remove. The method will be automatically decorated with
:meth:`removes_return` if not already decorated::
@collection.remover
def zap(self, entity): ...
# or, equivalently
@collection.remover
@collection.removes_return()
def zap(self, ): ...
If the value to remove is not present in the collection, you may
raise an exception or return None to ignore the error.
If the remove method is internally instrumented, you must also
receive the keyword argument '_sa_initiator' and ensure its
promulgation to collection events.
"""
fn._sa_instrument_role = "remover"
return fn
@staticmethod
def iterator(fn):
"""Tag the method as the collection remover.
The iterator method is called with no arguments. It is expected to
return an iterator over all collection members::
@collection.iterator
def __iter__(self): ...
"""
fn._sa_instrument_role = "iterator"
return fn
@staticmethod
def internally_instrumented(fn):
"""Tag the method as instrumented.
This tag will prevent any decoration from being applied to the
method. Use this if you are orchestrating your own calls to
:func:`.collection_adapter` in one of the basic SQLAlchemy
interface methods, or to prevent an automatic ABC method
decoration from wrapping your implementation::
# normally an 'extend' method on a list-like class would be
# automatically intercepted and re-implemented in terms of
# SQLAlchemy events and append(). your implementation will
# never be called, unless:
@collection.internally_instrumented
def extend(self, items): ...
"""
fn._sa_instrumented = True
return fn
@staticmethod
@util.deprecated(
"1.3",
"The :meth:`.collection.converter` handler is deprecated and will "
"be removed in a future release. Please refer to the "
":class:`.AttributeEvents.bulk_replace` listener interface in "
"conjunction with the :func:`.event.listen` function.",
)
def converter(fn):
"""Tag the method as the collection converter.
This optional method will be called when a collection is being
replaced entirely, as in::
myobj.acollection = [newvalue1, newvalue2]
The converter method will receive the object being assigned and should
return an iterable of values suitable for use by the ``appender``
method. A converter must not assign values or mutate the collection,
its sole job is to adapt the value the user provides into an iterable
of values for the ORM's use.
The default converter implementation will use duck-typing to do the
conversion. A dict-like collection will be convert into an iterable
of dictionary values, and other types will simply be iterated::
@collection.converter
def convert(self, other): ...
If the duck-typing of the object does not match the type of this
collection, a TypeError is raised.
Supply an implementation of this method if you want to expand the
range of possible types that can be assigned in bulk or perform
validation on the values about to be assigned.
"""
fn._sa_instrument_role = "converter"
return fn
@staticmethod
def adds(arg):
"""Mark the method as adding an entity to the collection.
Adds "add to collection" handling to the method. The decorator
argument indicates which method argument holds the SQLAlchemy-relevant
value. Arguments can be specified positionally (i.e. integer) or by
name::
@collection.adds(1)
def push(self, item): ...
@collection.adds('entity')
def do_stuff(self, thing, entity=None): ...
"""
def decorator(fn):
fn._sa_instrument_before = ("fire_append_event", arg)
return fn
return decorator
@staticmethod
def replaces(arg):
"""Mark the method as replacing an entity in the collection.
Adds "add to collection" and "remove from collection" handling to
the method. The decorator argument indicates which method argument
holds the SQLAlchemy-relevant value to be added, and return value, if
any will be considered the value to remove.
Arguments can be specified positionally (i.e. integer) or by name::
@collection.replaces(2)
def __setitem__(self, index, item): ...
"""
def decorator(fn):
fn._sa_instrument_before = ("fire_append_event", arg)
fn._sa_instrument_after = "fire_remove_event"
return fn
return decorator
@staticmethod
def removes(arg):
"""Mark the method as removing an entity in the collection.
Adds "remove from collection" handling to the method. The decorator
argument indicates which method argument holds the SQLAlchemy-relevant
value to be removed. Arguments can be specified positionally (i.e.
integer) or by name::
@collection.removes(1)
def zap(self, item): ...
For methods where the value to remove is not known at call-time, use
collection.removes_return.
"""
def decorator(fn):
fn._sa_instrument_before = ("fire_remove_event", arg)
return fn
return decorator
@staticmethod
def removes_return():
"""Mark the method as removing an entity in the collection.
Adds "remove from collection" handling to the method. The return
value of the method, if any, is considered the value to remove. The
method arguments are not inspected::
@collection.removes_return()
def pop(self): ...
For methods where the value to remove is known at call-time, use
collection.remove.
"""
def decorator(fn):
fn._sa_instrument_after = "fire_remove_event"
return fn
return decorator
collection_adapter = operator.attrgetter("_sa_adapter")
"""Fetch the :class:`.CollectionAdapter` for a collection."""
class CollectionAdapter(object):
"""Bridges between the ORM and arbitrary Python collections.
Proxies base-level collection operations (append, remove, iterate)
to the underlying Python collection, and emits add/remove events for
entities entering or leaving the collection.
The ORM uses :class:`.CollectionAdapter` exclusively for interaction with
entity collections.
"""
__slots__ = (
"attr",
"_key",
"_data",
"owner_state",
"_converter",
"invalidated",
"empty",
)
def __init__(self, attr, owner_state, data):
self.attr = attr
self._key = attr.key
self._data = weakref.ref(data)
self.owner_state = owner_state
data._sa_adapter = self
self._converter = data._sa_converter
self.invalidated = False
self.empty = False
def _warn_invalidated(self):
util.warn("This collection has been invalidated.")
@property
def data(self):
"The entity collection being adapted."
return self._data()
@property
def _referenced_by_owner(self):
"""return True if the owner state still refers to this collection.
This will return False within a bulk replace operation,
where this collection is the one being replaced.
"""
return self.owner_state.dict[self._key] is self._data()
def bulk_appender(self):
return self._data()._sa_appender
def append_with_event(self, item, initiator=None):
"""Add an entity to the collection, firing mutation events."""
self._data()._sa_appender(item, _sa_initiator=initiator)
def _set_empty(self, user_data):
assert (
not self.empty
), "This collection adapter is already in the 'empty' state"
self.empty = True
self.owner_state._empty_collections[self._key] = user_data
def _reset_empty(self):
assert (
self.empty
), "This collection adapter is not in the 'empty' state"
self.empty = False
self.owner_state.dict[
self._key
] = self.owner_state._empty_collections.pop(self._key)
def _refuse_empty(self):
raise sa_exc.InvalidRequestError(
"This is a special 'empty' collection which cannot accommodate "
"internal mutation operations"
)
def append_without_event(self, item):
"""Add or restore an entity to the collection, firing no events."""
if self.empty:
self._refuse_empty()
self._data()._sa_appender(item, _sa_initiator=False)
def append_multiple_without_event(self, items):
"""Add or restore an entity to the collection, firing no events."""
if self.empty:
self._refuse_empty()
appender = self._data()._sa_appender
for item in items:
appender(item, _sa_initiator=False)
def bulk_remover(self):
return self._data()._sa_remover
def remove_with_event(self, item, initiator=None):
"""Remove an entity from the collection, firing mutation events."""
self._data()._sa_remover(item, _sa_initiator=initiator)
def remove_without_event(self, item):
"""Remove an entity from the collection, firing no events."""
if self.empty:
self._refuse_empty()
self._data()._sa_remover(item, _sa_initiator=False)
def clear_with_event(self, initiator=None):
"""Empty the collection, firing a mutation event for each entity."""
if self.empty:
self._refuse_empty()
remover = self._data()._sa_remover
for item in list(self):
remover(item, _sa_initiator=initiator)
def clear_without_event(self):
"""Empty the collection, firing no events."""
if self.empty:
self._refuse_empty()
remover = self._data()._sa_remover
for item in list(self):
remover(item, _sa_initiator=False)
def __iter__(self):
"""Iterate over entities in the collection."""
return iter(self._data()._sa_iterator())
def __len__(self):
"""Count entities in the collection."""
return len(list(self._data()._sa_iterator()))
def __bool__(self):
return True
__nonzero__ = __bool__
def fire_append_wo_mutation_event(self, item, initiator=None):
"""Notify that a entity is entering the collection but is already
present.
Initiator is a token owned by the InstrumentedAttribute that
initiated the membership mutation, and should be left as None
unless you are passing along an initiator value from a chained
operation.
.. versionadded:: 1.4.15
"""
if initiator is not False:
if self.invalidated:
self._warn_invalidated()
if self.empty:
self._reset_empty()
return self.attr.fire_append_wo_mutation_event(
self.owner_state, self.owner_state.dict, item, initiator
)
else:
return item
def fire_append_event(self, item, initiator=None):
"""Notify that a entity has entered the collection.
Initiator is a token owned by the InstrumentedAttribute that
initiated the membership mutation, and should be left as None
unless you are passing along an initiator value from a chained
operation.
"""
if initiator is not False:
if self.invalidated:
self._warn_invalidated()
if self.empty:
self._reset_empty()
return self.attr.fire_append_event(
self.owner_state, self.owner_state.dict, item, initiator
)
else:
return item
def fire_remove_event(self, item, initiator=None):
"""Notify that a entity has been removed from the collection.
Initiator is the InstrumentedAttribute that initiated the membership
mutation, and should be left as None unless you are passing along
an initiator value from a chained operation.
"""
if initiator is not False:
if self.invalidated:
self._warn_invalidated()
if self.empty:
self._reset_empty()
self.attr.fire_remove_event(
self.owner_state, self.owner_state.dict, item, initiator
)
def fire_pre_remove_event(self, initiator=None):
"""Notify that an entity is about to be removed from the collection.
Only called if the entity cannot be removed after calling
fire_remove_event().
"""
if self.invalidated:
self._warn_invalidated()
self.attr.fire_pre_remove_event(
self.owner_state, self.owner_state.dict, initiator=initiator
)
def __getstate__(self):
return {
"key": self._key,
"owner_state": self.owner_state,
"owner_cls": self.owner_state.class_,
"data": self.data,
"invalidated": self.invalidated,
"empty": self.empty,
}
def __setstate__(self, d):
self._key = d["key"]
self.owner_state = d["owner_state"]
self._data = weakref.ref(d["data"])
self._converter = d["data"]._sa_converter
d["data"]._sa_adapter = self
self.invalidated = d["invalidated"]
self.attr = getattr(d["owner_cls"], self._key).impl
self.empty = d.get("empty", False)
def bulk_replace(values, existing_adapter, new_adapter, initiator=None):
"""Load a new collection, firing events based on prior like membership.
Appends instances in ``values`` onto the ``new_adapter``. Events will be
fired for any instance not present in the ``existing_adapter``. Any
instances in ``existing_adapter`` not present in ``values`` will have
remove events fired upon them.
:param values: An iterable of collection member instances
:param existing_adapter: A :class:`.CollectionAdapter` of
instances to be replaced
:param new_adapter: An empty :class:`.CollectionAdapter`
to load with ``values``
"""
assert isinstance(values, list)
idset = util.IdentitySet
existing_idset = idset(existing_adapter or ())
constants = existing_idset.intersection(values or ())
additions = idset(values or ()).difference(constants)
removals = existing_idset.difference(constants)
appender = new_adapter.bulk_appender()
for member in values or ():
if member in additions:
appender(member, _sa_initiator=initiator)
elif member in constants:
appender(member, _sa_initiator=False)
if existing_adapter:
for member in removals:
existing_adapter.fire_remove_event(member, initiator=initiator)
def prepare_instrumentation(factory):
"""Prepare a callable for future use as a collection class factory.
Given a collection class factory (either a type or no-arg callable),
return another factory that will produce compatible instances when
called.
This function is responsible for converting collection_class=list
into the run-time behavior of collection_class=InstrumentedList.
"""
# Convert a builtin to 'Instrumented*'
if factory in __canned_instrumentation:
factory = __canned_instrumentation[factory]
# Create a specimen
cls = type(factory())
# Did factory callable return a builtin?
if cls in __canned_instrumentation:
# Wrap it so that it returns our 'Instrumented*'
factory = __converting_factory(cls, factory)
cls = factory()
# Instrument the class if needed.
if __instrumentation_mutex.acquire():
try:
if getattr(cls, "_sa_instrumented", None) != id(cls):
_instrument_class(cls)
finally:
__instrumentation_mutex.release()
return factory
def __converting_factory(specimen_cls, original_factory):
"""Return a wrapper that converts a "canned" collection like
set, dict, list into the Instrumented* version.
"""
instrumented_cls = __canned_instrumentation[specimen_cls]
def wrapper():
collection = original_factory()
return instrumented_cls(collection)
# often flawed but better than nothing
wrapper.__name__ = "%sWrapper" % original_factory.__name__
wrapper.__doc__ = original_factory.__doc__
return wrapper
def _instrument_class(cls):
"""Modify methods in a class and install instrumentation."""
# In the normal call flow, a request for any of the 3 basic collection
# types is transformed into one of our trivial subclasses
# (e.g. InstrumentedList). Catch anything else that sneaks in here...
if cls.__module__ == "__builtin__":
raise sa_exc.ArgumentError(
"Can not instrument a built-in type. Use a "
"subclass, even a trivial one."
)
roles, methods = _locate_roles_and_methods(cls)
_setup_canned_roles(cls, roles, methods)
_assert_required_roles(cls, roles, methods)
_set_collection_attributes(cls, roles, methods)
def _locate_roles_and_methods(cls):
"""search for _sa_instrument_role-decorated methods in
method resolution order, assign to roles.
"""
roles = {}
methods = {}
for supercls in cls.__mro__:
for name, method in vars(supercls).items():
if not callable(method):
continue
# note role declarations
if hasattr(method, "_sa_instrument_role"):
role = method._sa_instrument_role
assert role in (
"appender",
"remover",
"iterator",
"converter",
)
roles.setdefault(role, name)
# transfer instrumentation requests from decorated function
# to the combined queue
before, after = None, None
if hasattr(method, "_sa_instrument_before"):
op, argument = method._sa_instrument_before
assert op in ("fire_append_event", "fire_remove_event")
before = op, argument
if hasattr(method, "_sa_instrument_after"):
op = method._sa_instrument_after
assert op in ("fire_append_event", "fire_remove_event")
after = op
if before:
methods[name] = before + (after,)
elif after:
methods[name] = None, None, after
return roles, methods
def _setup_canned_roles(cls, roles, methods):
"""see if this class has "canned" roles based on a known
collection type (dict, set, list). Apply those roles
as needed to the "roles" dictionary, and also
prepare "decorator" methods
"""
collection_type = util.duck_type_collection(cls)
if collection_type in __interfaces:
canned_roles, decorators = __interfaces[collection_type]
for role, name in canned_roles.items():
roles.setdefault(role, name)
# apply ABC auto-decoration to methods that need it
for method, decorator in decorators.items():
fn = getattr(cls, method, None)
if (
fn
and method not in methods
and not hasattr(fn, "_sa_instrumented")
):
setattr(cls, method, decorator(fn))
def _assert_required_roles(cls, roles, methods):
"""ensure all roles are present, and apply implicit instrumentation if
needed
"""
if "appender" not in roles or not hasattr(cls, roles["appender"]):
raise sa_exc.ArgumentError(
"Type %s must elect an appender method to be "
"a collection class" % cls.__name__
)
elif roles["appender"] not in methods and not hasattr(
getattr(cls, roles["appender"]), "_sa_instrumented"
):
methods[roles["appender"]] = ("fire_append_event", 1, None)
if "remover" not in roles or not hasattr(cls, roles["remover"]):
raise sa_exc.ArgumentError(
"Type %s must elect a remover method to be "
"a collection class" % cls.__name__
)
elif roles["remover"] not in methods and not hasattr(
getattr(cls, roles["remover"]), "_sa_instrumented"
):
methods[roles["remover"]] = ("fire_remove_event", 1, None)
if "iterator" not in roles or not hasattr(cls, roles["iterator"]):
raise sa_exc.ArgumentError(
"Type %s must elect an iterator method to be "
"a collection class" % cls.__name__
)
def _set_collection_attributes(cls, roles, methods):
"""apply ad-hoc instrumentation from decorators, class-level defaults
and implicit role declarations
"""
for method_name, (before, argument, after) in methods.items():
setattr(
cls,
method_name,
_instrument_membership_mutator(
getattr(cls, method_name), before, argument, after
),
)
# intern the role map
for role, method_name in roles.items():
setattr(cls, "_sa_%s" % role, getattr(cls, method_name))
cls._sa_adapter = None
if not hasattr(cls, "_sa_converter"):
cls._sa_converter = None
cls._sa_instrumented = id(cls)
def _instrument_membership_mutator(method, before, argument, after):
"""Route method args and/or return value through the collection
adapter."""
# This isn't smart enough to handle @adds(1) for 'def fn(self, (a, b))'
if before:
fn_args = list(
util.flatten_iterator(inspect_getfullargspec(method)[0])
)
if isinstance(argument, int):
pos_arg = argument
named_arg = len(fn_args) > argument and fn_args[argument] or None
else:
if argument in fn_args:
pos_arg = fn_args.index(argument)
else:
pos_arg = None
named_arg = argument
del fn_args
def wrapper(*args, **kw):
if before:
if pos_arg is None:
if named_arg not in kw:
raise sa_exc.ArgumentError(
"Missing argument %s" % argument
)
value = kw[named_arg]
else:
if len(args) > pos_arg:
value = args[pos_arg]
elif named_arg in kw:
value = kw[named_arg]
else:
raise sa_exc.ArgumentError(
"Missing argument %s" % argument
)
initiator = kw.pop("_sa_initiator", None)
if initiator is False:
executor = None
else:
executor = args[0]._sa_adapter
if before and executor:
getattr(executor, before)(value, initiator)
if not after or not executor:
return method(*args, **kw)
else:
res = method(*args, **kw)
if res is not None:
getattr(executor, after)(res, initiator)
return res
wrapper._sa_instrumented = True
if hasattr(method, "_sa_instrument_role"):
wrapper._sa_instrument_role = method._sa_instrument_role
wrapper.__name__ = method.__name__
wrapper.__doc__ = method.__doc__
return wrapper
def __set_wo_mutation(collection, item, _sa_initiator=None):
"""Run set wo mutation events.
The collection is not mutated.
"""
if _sa_initiator is not False:
executor = collection._sa_adapter
if executor:
executor.fire_append_wo_mutation_event(item, _sa_initiator)
def __set(collection, item, _sa_initiator=None):
"""Run set events.
This event always occurs before the collection is actually mutated.
"""
if _sa_initiator is not False:
executor = collection._sa_adapter
if executor:
item = executor.fire_append_event(item, _sa_initiator)
return item
def __del(collection, item, _sa_initiator=None):
"""Run del events.
This event occurs before the collection is actually mutated, *except*
in the case of a pop operation, in which case it occurs afterwards.
For pop operations, the __before_pop hook is called before the
operation occurs.
"""
if _sa_initiator is not False:
executor = collection._sa_adapter
if executor:
executor.fire_remove_event(item, _sa_initiator)
def __before_pop(collection, _sa_initiator=None):
"""An event which occurs on a before a pop() operation occurs."""
executor = collection._sa_adapter
if executor:
executor.fire_pre_remove_event(_sa_initiator)
def _list_decorators():
"""Tailored instrumentation wrappers for any list-like class."""
def _tidy(fn):
fn._sa_instrumented = True
fn.__doc__ = getattr(list, fn.__name__).__doc__
def append(fn):
def append(self, item, _sa_initiator=None):
item = __set(self, item, _sa_initiator)
fn(self, item)
_tidy(append)
return append
def remove(fn):
def remove(self, value, _sa_initiator=None):
__del(self, value, _sa_initiator)
# testlib.pragma exempt:__eq__
fn(self, value)
_tidy(remove)
return remove
def insert(fn):
def insert(self, index, value):
value = __set(self, value)
fn(self, index, value)
_tidy(insert)
return insert
def __setitem__(fn):
def __setitem__(self, index, value):
if not isinstance(index, slice):
existing = self[index]
if existing is not None:
__del(self, existing)
value = __set(self, value)
fn(self, index, value)
else:
# slice assignment requires __delitem__, insert, __len__
step = index.step or 1
start = index.start or 0
if start < 0:
start += len(self)
if index.stop is not None:
stop = index.stop
else:
stop = len(self)
if stop < 0:
stop += len(self)
if step == 1:
if value is self:
return
for i in range(start, stop, step):
if len(self) > start:
del self[start]
for i, item in enumerate(value):
self.insert(i + start, item)
else:
rng = list(range(start, stop, step))
if len(value) != len(rng):
raise ValueError(
"attempt to assign sequence of size %s to "
"extended slice of size %s"
% (len(value), len(rng))
)
for i, item in zip(rng, value):
self.__setitem__(i, item)
_tidy(__setitem__)
return __setitem__
def __delitem__(fn):
def __delitem__(self, index):
if not isinstance(index, slice):
item = self[index]
__del(self, item)
fn(self, index)
else:
# slice deletion requires __getslice__ and a slice-groking
# __getitem__ for stepped deletion
# note: not breaking this into atomic dels
for item in self[index]:
__del(self, item)
fn(self, index)
_tidy(__delitem__)
return __delitem__
if util.py2k:
def __setslice__(fn):
def __setslice__(self, start, end, values):
for value in self[start:end]:
__del(self, value)
values = [__set(self, value) for value in values]
fn(self, start, end, values)
_tidy(__setslice__)
return __setslice__
def __delslice__(fn):
def __delslice__(self, start, end):
for value in self[start:end]:
__del(self, value)
fn(self, start, end)
_tidy(__delslice__)
return __delslice__
def extend(fn):
def extend(self, iterable):
for value in iterable:
self.append(value)
_tidy(extend)
return extend
def __iadd__(fn):
def __iadd__(self, iterable):
# list.__iadd__ takes any iterable and seems to let TypeError
# raise as-is instead of returning NotImplemented
for value in iterable:
self.append(value)
return self
_tidy(__iadd__)
return __iadd__
def pop(fn):
def pop(self, index=-1):
__before_pop(self)
item = fn(self, index)
__del(self, item)
return item
_tidy(pop)
return pop
if not util.py2k:
def clear(fn):
def clear(self, index=-1):
for item in self:
__del(self, item)
fn(self)
_tidy(clear)
return clear
# __imul__ : not wrapping this. all members of the collection are already
# present, so no need to fire appends... wrapping it with an explicit
# decorator is still possible, so events on *= can be had if they're
# desired. hard to imagine a use case for __imul__, though.
l = locals().copy()
l.pop("_tidy")
return l
def _dict_decorators():
"""Tailored instrumentation wrappers for any dict-like mapping class."""
def _tidy(fn):
fn._sa_instrumented = True
fn.__doc__ = getattr(dict, fn.__name__).__doc__
Unspecified = util.symbol("Unspecified")
def __setitem__(fn):
def __setitem__(self, key, value, _sa_initiator=None):
if key in self:
__del(self, self[key], _sa_initiator)
value = __set(self, value, _sa_initiator)
fn(self, key, value)
_tidy(__setitem__)
return __setitem__
def __delitem__(fn):
def __delitem__(self, key, _sa_initiator=None):
if key in self:
__del(self, self[key], _sa_initiator)
fn(self, key)
_tidy(__delitem__)
return __delitem__
def clear(fn):
def clear(self):
for key in self:
__del(self, self[key])
fn(self)
_tidy(clear)
return clear
def pop(fn):
def pop(self, key, default=Unspecified):
__before_pop(self)
_to_del = key in self
if default is Unspecified:
item = fn(self, key)
else:
item = fn(self, key, default)
if _to_del:
__del(self, item)
return item
_tidy(pop)
return pop
def popitem(fn):
def popitem(self):
__before_pop(self)
item = fn(self)
__del(self, item[1])
return item
_tidy(popitem)
return popitem
def setdefault(fn):
def setdefault(self, key, default=None):
if key not in self:
self.__setitem__(key, default)
return default
else:
value = self.__getitem__(key)
if value is default:
__set_wo_mutation(self, value, None)
return value
_tidy(setdefault)
return setdefault
def update(fn):
def update(self, __other=Unspecified, **kw):
if __other is not Unspecified:
if hasattr(__other, "keys"):
for key in list(__other):
if key not in self or self[key] is not __other[key]:
self[key] = __other[key]
else:
__set_wo_mutation(self, __other[key], None)
else:
for key, value in __other:
if key not in self or self[key] is not value:
self[key] = value
else:
__set_wo_mutation(self, value, None)
for key in kw:
if key not in self or self[key] is not kw[key]:
self[key] = kw[key]
else:
__set_wo_mutation(self, kw[key], None)
_tidy(update)
return update
l = locals().copy()
l.pop("_tidy")
l.pop("Unspecified")
return l
_set_binop_bases = (set, frozenset)
def _set_binops_check_strict(self, obj):
"""Allow only set, frozenset and self.__class__-derived
objects in binops."""
return isinstance(obj, _set_binop_bases + (self.__class__,))
def _set_binops_check_loose(self, obj):
"""Allow anything set-like to participate in set binops."""
return (
isinstance(obj, _set_binop_bases + (self.__class__,))
or util.duck_type_collection(obj) == set
)
def _set_decorators():
"""Tailored instrumentation wrappers for any set-like class."""
def _tidy(fn):
fn._sa_instrumented = True
fn.__doc__ = getattr(set, fn.__name__).__doc__
Unspecified = util.symbol("Unspecified")
def add(fn):
def add(self, value, _sa_initiator=None):
if value not in self:
value = __set(self, value, _sa_initiator)
else:
__set_wo_mutation(self, value, _sa_initiator)
# testlib.pragma exempt:__hash__
fn(self, value)
_tidy(add)
return add
def discard(fn):
def discard(self, value, _sa_initiator=None):
# testlib.pragma exempt:__hash__
if value in self:
__del(self, value, _sa_initiator)
# testlib.pragma exempt:__hash__
fn(self, value)
_tidy(discard)
return discard
def remove(fn):
def remove(self, value, _sa_initiator=None):
# testlib.pragma exempt:__hash__
if value in self:
__del(self, value, _sa_initiator)
# testlib.pragma exempt:__hash__
fn(self, value)
_tidy(remove)
return remove
def pop(fn):
def pop(self):
__before_pop(self)
item = fn(self)
# for set in particular, we have no way to access the item
# that will be popped before pop is called.
__del(self, item)
return item
_tidy(pop)
return pop
def clear(fn):
def clear(self):
for item in list(self):
self.remove(item)
_tidy(clear)
return clear
def update(fn):
def update(self, value):
for item in value:
self.add(item)
_tidy(update)
return update
def __ior__(fn):
def __ior__(self, value):
if not _set_binops_check_strict(self, value):
return NotImplemented
for item in value:
self.add(item)
return self
_tidy(__ior__)
return __ior__
def difference_update(fn):
def difference_update(self, value):
for item in value:
self.discard(item)
_tidy(difference_update)
return difference_update
def __isub__(fn):
def __isub__(self, value):
if not _set_binops_check_strict(self, value):
return NotImplemented
for item in value:
self.discard(item)
return self
_tidy(__isub__)
return __isub__
def intersection_update(fn):
def intersection_update(self, other):
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
_tidy(intersection_update)
return intersection_update
def __iand__(fn):
def __iand__(self, other):
if not _set_binops_check_strict(self, other):
return NotImplemented
want, have = self.intersection(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
return self
_tidy(__iand__)
return __iand__
def symmetric_difference_update(fn):
def symmetric_difference_update(self, other):
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
_tidy(symmetric_difference_update)
return symmetric_difference_update
def __ixor__(fn):
def __ixor__(self, other):
if not _set_binops_check_strict(self, other):
return NotImplemented
want, have = self.symmetric_difference(other), set(self)
remove, add = have - want, want - have
for item in remove:
self.remove(item)
for item in add:
self.add(item)
return self
_tidy(__ixor__)
return __ixor__
l = locals().copy()
l.pop("_tidy")
l.pop("Unspecified")
return l
class InstrumentedList(list):
"""An instrumented version of the built-in list."""
class InstrumentedSet(set):
"""An instrumented version of the built-in set."""
class InstrumentedDict(dict):
"""An instrumented version of the built-in dict."""
__canned_instrumentation = {
list: InstrumentedList,
set: InstrumentedSet,
dict: InstrumentedDict,
}
__interfaces = {
list: (
{"appender": "append", "remover": "remove", "iterator": "__iter__"},
_list_decorators(),
),
set: (
{"appender": "add", "remover": "remove", "iterator": "__iter__"},
_set_decorators(),
),
# decorators are required for dicts and object collections.
dict: ({"iterator": "values"}, _dict_decorators())
if util.py3k
else ({"iterator": "itervalues"}, _dict_decorators()),
}
class MappedCollection(dict):
"""A basic dictionary-based collection class.
Extends dict with the minimal bag semantics that collection
classes require. ``set`` and ``remove`` are implemented in terms
of a keying function: any callable that takes an object and
returns an object for use as a dictionary key.
"""
def __init__(self, keyfunc):
"""Create a new collection with keying provided by keyfunc.
keyfunc may be any callable that takes an object and returns an object
for use as a dictionary key.
The keyfunc will be called every time the ORM needs to add a member by
value-only (such as when loading instances from the database) or
remove a member. The usual cautions about dictionary keying apply-
``keyfunc(object)`` should return the same output for the life of the
collection. Keying based on mutable properties can result in
unreachable instances "lost" in the collection.
"""
self.keyfunc = keyfunc
@collection.appender
@collection.internally_instrumented
def set(self, value, _sa_initiator=None):
"""Add an item by value, consulting the keyfunc for the key."""
key = self.keyfunc(value)
self.__setitem__(key, value, _sa_initiator)
@collection.remover
@collection.internally_instrumented
def remove(self, value, _sa_initiator=None):
"""Remove an item by value, consulting the keyfunc for the key."""
key = self.keyfunc(value)
# Let self[key] raise if key is not in this collection
# testlib.pragma exempt:__ne__
if self[key] != value:
raise sa_exc.InvalidRequestError(
"Can not remove '%s': collection holds '%s' for key '%s'. "
"Possible cause: is the MappedCollection key function "
"based on mutable properties or properties that only obtain "
"values after flush?" % (value, self[key], key)
)
self.__delitem__(key, _sa_initiator)
# ensure instrumentation is associated with
# these built-in classes; if a user-defined class
# subclasses these and uses @internally_instrumented,
# the superclass is otherwise not instrumented.
# see [ticket:2406].
_instrument_class(MappedCollection)
_instrument_class(InstrumentedList)
_instrument_class(InstrumentedSet)
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
refs/heads/master
|
galaxy-dist/eggs/numpy-1.6.0-py2.7-linux-x86_64-ucs4.egg/numpy/oldnumeric/functions.py
|
88
|
# Functions that should behave the same as Numeric and need changing
import numpy as np
import numpy.core.multiarray as mu
import numpy.core.numeric as nn
from typeconv import convtypecode, convtypecode2
__all__ = ['take', 'repeat', 'sum', 'product', 'sometrue', 'alltrue',
'cumsum', 'cumproduct', 'compress', 'fromfunction',
'ones', 'empty', 'identity', 'zeros', 'array', 'asarray',
'nonzero', 'reshape', 'arange', 'fromstring', 'ravel', 'trace',
'indices', 'where','sarray','cross_product', 'argmax', 'argmin',
'average']
def take(a, indicies, axis=0):
return np.take(a, indicies, axis)
def repeat(a, repeats, axis=0):
return np.repeat(a, repeats, axis)
def sum(x, axis=0):
return np.sum(x, axis)
def product(x, axis=0):
return np.product(x, axis)
def sometrue(x, axis=0):
return np.sometrue(x, axis)
def alltrue(x, axis=0):
return np.alltrue(x, axis)
def cumsum(x, axis=0):
return np.cumsum(x, axis)
def cumproduct(x, axis=0):
return np.cumproduct(x, axis)
def argmax(x, axis=-1):
return np.argmax(x, axis)
def argmin(x, axis=-1):
return np.argmin(x, axis)
def compress(condition, m, axis=-1):
return np.compress(condition, m, axis)
def fromfunction(args, dimensions):
return np.fromfunction(args, dimensions, dtype=int)
def ones(shape, typecode='l', savespace=0, dtype=None):
"""ones(shape, dtype=int) returns an array of the given
dimensions which is initialized to all ones.
"""
dtype = convtypecode(typecode,dtype)
a = mu.empty(shape, dtype)
a.fill(1)
return a
def zeros(shape, typecode='l', savespace=0, dtype=None):
"""zeros(shape, dtype=int) returns an array of the given
dimensions which is initialized to all zeros
"""
dtype = convtypecode(typecode,dtype)
return mu.zeros(shape, dtype)
def identity(n,typecode='l', dtype=None):
"""identity(n) returns the identity 2-d array of shape n x n.
"""
dtype = convtypecode(typecode, dtype)
return nn.identity(n, dtype)
def empty(shape, typecode='l', dtype=None):
dtype = convtypecode(typecode, dtype)
return mu.empty(shape, dtype)
def array(sequence, typecode=None, copy=1, savespace=0, dtype=None):
dtype = convtypecode2(typecode, dtype)
return mu.array(sequence, dtype, copy=copy)
def sarray(a, typecode=None, copy=False, dtype=None):
dtype = convtypecode2(typecode, dtype)
return mu.array(a, dtype, copy)
def asarray(a, typecode=None, dtype=None):
dtype = convtypecode2(typecode, dtype)
return mu.array(a, dtype, copy=0)
def nonzero(a):
res = np.nonzero(a)
if len(res) == 1:
return res[0]
else:
raise ValueError, "Input argument must be 1d"
def reshape(a, shape):
return np.reshape(a, shape)
def arange(start, stop=None, step=1, typecode=None, dtype=None):
dtype = convtypecode2(typecode, dtype)
return mu.arange(start, stop, step, dtype)
def fromstring(string, typecode='l', count=-1, dtype=None):
dtype = convtypecode(typecode, dtype)
return mu.fromstring(string, dtype, count=count)
def ravel(m):
return np.ravel(m)
def trace(a, offset=0, axis1=0, axis2=1):
return np.trace(a, offset=0, axis1=0, axis2=1)
def indices(dimensions, typecode=None, dtype=None):
dtype = convtypecode(typecode, dtype)
return np.indices(dimensions, dtype)
def where(condition, x, y):
return np.where(condition, x, y)
def cross_product(a, b, axis1=-1, axis2=-1):
return np.cross(a, b, axis1, axis2)
def average(a, axis=0, weights=None, returned=False):
return np.average(a, axis, weights, returned)
|
shinate/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/__init__.py
|
6014
|
# Required for Python to search this directory for module files
|
saumishr/django
|
refs/heads/master
|
tests/regressiontests/string_lookup/tests.py
|
34
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.test import TestCase
from .models import Foo, Whiz, Bar, Article, Base, Child
class StringLookupTests(TestCase):
def test_string_form_referencing(self):
"""
Regression test for #1661 and #1662
Check that string form referencing of
models works, both as pre and post reference, on all RelatedField types.
"""
f1 = Foo(name="Foo1")
f1.save()
f2 = Foo(name="Foo2")
f2.save()
w1 = Whiz(name="Whiz1")
w1.save()
b1 = Bar(name="Bar1", normal=f1, fwd=w1, back=f2)
b1.save()
self.assertEqual(b1.normal, f1)
self.assertEqual(b1.fwd, w1)
self.assertEqual(b1.back, f2)
base1 = Base(name="Base1")
base1.save()
child1 = Child(name="Child1", parent=base1)
child1.save()
self.assertEqual(child1.parent, base1)
def test_unicode_chars_in_queries(self):
"""
Regression tests for #3937
make sure we can use unicode characters in queries.
If these tests fail on MySQL, it's a problem with the test setup.
A properly configured UTF-8 database can handle this.
"""
fx = Foo(name='Bjorn', friend=u'François')
fx.save()
self.assertEqual(Foo.objects.get(friend__contains=u'\xe7'), fx)
# We can also do the above query using UTF-8 strings.
self.assertEqual(Foo.objects.get(friend__contains='\xc3\xa7'), fx)
def test_queries_on_textfields(self):
"""
Regression tests for #5087
make sure we can perform queries on TextFields.
"""
a = Article(name='Test', text='The quick brown fox jumps over the lazy dog.')
a.save()
self.assertEqual(Article.objects.get(text__exact='The quick brown fox jumps over the lazy dog.'), a)
self.assertEqual(Article.objects.get(text__contains='quick brown fox'), a)
def test_ipaddress_on_postgresql(self):
"""
Regression test for #708
"like" queries on IP address fields require casting to text (on PostgreSQL).
"""
a = Article(name='IP test', text='The body', submitted_from='192.0.2.100')
a.save()
self.assertEqual(repr(Article.objects.filter(submitted_from__contains='192.0.2')),
repr([a]))
|
yordan-desta/QgisIns
|
refs/heads/master
|
python/plugins/processing/algs/lidar/lastools/shp2las.py
|
2
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
las2txt.py
---------------------
Date : September 2013
Copyright : (C) 2013 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Martin Isenburg'
__date__ = 'September 2013'
__copyright__ = '(C) 2013, Martin Isenburg'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from LAStoolsUtils import LAStoolsUtils
from LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterFile
class shp2las(LAStoolsAlgorithm):
INPUT = "INPUT"
SCALE_FACTOR_XY = "SCALE_FACTOR_XY"
SCALE_FACTOR_Z = "SCALE_FACTOR_Z"
def defineCharacteristics(self):
self.name = "shp2las"
self.group = "LAStools"
self.addParametersVerboseGUI()
self.addParameter(ParameterFile(shp2las.INPUT, "Input SHP file"))
self.addParameter(ParameterNumber(shp2las.SCALE_FACTOR_XY, "resolution of x and y coordinate", False, False, 0.01))
self.addParameter(ParameterNumber(shp2las.SCALE_FACTOR_Z, "resolution of z coordinate", False, False, 0.01))
self.addParametersPointOutputGUI()
def processAlgorithm(self, progress):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "shp2las.exe")]
self.addParametersVerboseCommands(commands)
commands.append("-i")
commands.append(self.getParameterValue(shp2las.INPUT))
scale_factor_xy = self.getParameterValue(shp2las.SCALE_FACTOR_XY)
scale_factor_z = self.getParameterValue(shp2las.SCALE_FACTOR_Z)
if scale_factor_xy != 0.01 or scale_factor_z != 0.01:
commands.append("-set_scale_factor")
commands.append(str(scale_factor_xy) + " " + str(scale_factor_xy) + " " + str(scale_factor_z))
self.addParametersPointOutputCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
|
cloudera/hue
|
refs/heads/master
|
desktop/core/ext-py/nose-1.3.7/functional_tests/support/todo/todoplug.py
|
10
|
from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
class Todo(Exception):
pass
class TodoPlugin(ErrorClassPlugin):
todo = ErrorClass(Todo, label='TODO', isfailure=True)
|
luzheqi1987/nova-annotation
|
refs/heads/master
|
nova/tests/unit/virt/xenapi/test_volumeops.py
|
65
|
# Copyright (c) 2012 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import exception
from nova import test
from nova.tests.unit.virt.xenapi import stubs
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import volumeops
class VolumeOpsTestBase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(VolumeOpsTestBase, self).setUp()
self._setup_mock_volumeops()
def _setup_mock_volumeops(self):
self.session = stubs.FakeSessionForVolumeTests('fake_uri')
self.ops = volumeops.VolumeOps(self.session)
class VolumeDetachTestCase(VolumeOpsTestBase):
def test_detach_volume_call(self):
registered_calls = []
def regcall(label):
def side_effect(*args, **kwargs):
registered_calls.append(label)
return side_effect
ops = volumeops.VolumeOps('session')
self.mox.StubOutWithMock(volumeops.vm_utils, 'lookup')
self.mox.StubOutWithMock(volumeops.volume_utils, 'find_vbd_by_number')
self.mox.StubOutWithMock(volumeops.vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(volumeops.vm_utils, 'unplug_vbd')
self.mox.StubOutWithMock(volumeops.vm_utils, 'destroy_vbd')
self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
self.mox.StubOutWithMock(volumeops.volume_utils, 'find_sr_from_vbd')
self.mox.StubOutWithMock(volumeops.volume_utils, 'purge_sr')
volumeops.vm_utils.lookup('session', 'instance_1').AndReturn(
'vmref')
volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
'devnumber')
volumeops.volume_utils.find_vbd_by_number(
'session', 'vmref', 'devnumber').AndReturn('vbdref')
volumeops.vm_utils.is_vm_shutdown('session', 'vmref').AndReturn(
False)
volumeops.vm_utils.unplug_vbd('session', 'vbdref', 'vmref')
volumeops.vm_utils.destroy_vbd('session', 'vbdref').WithSideEffects(
regcall('destroy_vbd'))
volumeops.volume_utils.find_sr_from_vbd(
'session', 'vbdref').WithSideEffects(
regcall('find_sr_from_vbd')).AndReturn('srref')
volumeops.volume_utils.purge_sr('session', 'srref')
self.mox.ReplayAll()
ops.detach_volume(
dict(driver_volume_type='iscsi', data='conn_data'),
'instance_1', 'mountpoint')
self.assertEqual(
['find_sr_from_vbd', 'destroy_vbd'], registered_calls)
@mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
@mock.patch.object(volume_utils, "find_vbd_by_number")
@mock.patch.object(vm_utils, "vm_ref_or_raise")
def test_detach_volume(self, mock_vm, mock_vbd, mock_detach):
mock_vm.return_value = "vm_ref"
mock_vbd.return_value = "vbd_ref"
self.ops.detach_volume({}, "name", "/dev/xvdd")
mock_vm.assert_called_once_with(self.session, "name")
mock_vbd.assert_called_once_with(self.session, "vm_ref", 3)
mock_detach.assert_called_once_with("vm_ref", ["vbd_ref"])
@mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
@mock.patch.object(volume_utils, "find_vbd_by_number")
@mock.patch.object(vm_utils, "vm_ref_or_raise")
def test_detach_volume_skips_error_skip_attach(self, mock_vm, mock_vbd,
mock_detach):
mock_vm.return_value = "vm_ref"
mock_vbd.return_value = None
self.ops.detach_volume({}, "name", "/dev/xvdd")
self.assertFalse(mock_detach.called)
@mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
@mock.patch.object(volume_utils, "find_vbd_by_number")
@mock.patch.object(vm_utils, "vm_ref_or_raise")
def test_detach_volume_raises(self, mock_vm, mock_vbd,
mock_detach):
mock_vm.return_value = "vm_ref"
mock_vbd.side_effect = test.TestingException
self.assertRaises(test.TestingException,
self.ops.detach_volume, {}, "name", "/dev/xvdd")
self.assertFalse(mock_detach.called)
@mock.patch.object(volume_utils, "purge_sr")
@mock.patch.object(vm_utils, "destroy_vbd")
@mock.patch.object(volume_utils, "find_sr_from_vbd")
@mock.patch.object(vm_utils, "unplug_vbd")
@mock.patch.object(vm_utils, "is_vm_shutdown")
def test_detach_vbds_and_srs_not_shutdown(self, mock_shutdown, mock_unplug,
mock_find_sr, mock_destroy, mock_purge):
mock_shutdown.return_value = False
mock_find_sr.return_value = "sr_ref"
self.ops._detach_vbds_and_srs("vm_ref", ["vbd_ref"])
mock_shutdown.assert_called_once_with(self.session, "vm_ref")
mock_find_sr.assert_called_once_with(self.session, "vbd_ref")
mock_unplug.assert_called_once_with(self.session, "vbd_ref", "vm_ref")
mock_destroy.assert_called_once_with(self.session, "vbd_ref")
mock_purge.assert_called_once_with(self.session, "sr_ref")
@mock.patch.object(volume_utils, "purge_sr")
@mock.patch.object(vm_utils, "destroy_vbd")
@mock.patch.object(volume_utils, "find_sr_from_vbd")
@mock.patch.object(vm_utils, "unplug_vbd")
@mock.patch.object(vm_utils, "is_vm_shutdown")
def test_detach_vbds_and_srs_is_shutdown(self, mock_shutdown, mock_unplug,
mock_find_sr, mock_destroy, mock_purge):
mock_shutdown.return_value = True
mock_find_sr.return_value = "sr_ref"
self.ops._detach_vbds_and_srs("vm_ref", ["vbd_ref_1", "vbd_ref_2"])
expected = [mock.call(self.session, "vbd_ref_1"),
mock.call(self.session, "vbd_ref_2")]
self.assertEqual(expected, mock_destroy.call_args_list)
mock_purge.assert_called_with(self.session, "sr_ref")
self.assertFalse(mock_unplug.called)
@mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_detach_all_no_volumes(self, mock_get_all, mock_detach):
mock_get_all.return_value = []
self.ops.detach_all("vm_ref")
mock_get_all.assert_called_once_with("vm_ref")
self.assertFalse(mock_detach.called)
@mock.patch.object(volumeops.VolumeOps, "_detach_vbds_and_srs")
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_detach_all_volumes(self, mock_get_all, mock_detach):
mock_get_all.return_value = ["1"]
self.ops.detach_all("vm_ref")
mock_get_all.assert_called_once_with("vm_ref")
mock_detach.assert_called_once_with("vm_ref", ["1"])
def test_get_all_volume_vbd_refs_no_vbds(self):
with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
with mock.patch.object(self.session.VBD,
"get_other_config") as mock_conf:
mock_get.return_value = []
result = self.ops._get_all_volume_vbd_refs("vm_ref")
self.assertEqual([], list(result))
mock_get.assert_called_once_with("vm_ref")
self.assertFalse(mock_conf.called)
def test_get_all_volume_vbd_refs_no_volumes(self):
with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
with mock.patch.object(self.session.VBD,
"get_other_config") as mock_conf:
mock_get.return_value = ["1"]
mock_conf.return_value = {}
result = self.ops._get_all_volume_vbd_refs("vm_ref")
self.assertEqual([], list(result))
mock_get.assert_called_once_with("vm_ref")
mock_conf.assert_called_once_with("1")
def test_get_all_volume_vbd_refs_with_volumes(self):
with mock.patch.object(self.session.VM, "get_VBDs") as mock_get:
with mock.patch.object(self.session.VBD,
"get_other_config") as mock_conf:
mock_get.return_value = ["1", "2"]
mock_conf.return_value = {"osvol": True}
result = self.ops._get_all_volume_vbd_refs("vm_ref")
self.assertEqual(["1", "2"], list(result))
mock_get.assert_called_once_with("vm_ref")
class AttachVolumeTestCase(VolumeOpsTestBase):
@mock.patch.object(volumeops.VolumeOps, "_attach_volume")
@mock.patch.object(vm_utils, "vm_ref_or_raise")
def test_attach_volume_default_hotplug(self, mock_get_vm, mock_attach):
mock_get_vm.return_value = "vm_ref"
self.ops.attach_volume({}, "instance_name", "/dev/xvda")
mock_attach.assert_called_once_with({}, "vm_ref", "instance_name", 0,
True)
@mock.patch.object(volumeops.VolumeOps, "_attach_volume")
@mock.patch.object(vm_utils, "vm_ref_or_raise")
def test_attach_volume_hotplug(self, mock_get_vm, mock_attach):
mock_get_vm.return_value = "vm_ref"
self.ops.attach_volume({}, "instance_name", "/dev/xvda", False)
mock_attach.assert_called_once_with({}, "vm_ref", "instance_name", 0,
False)
@mock.patch.object(volumeops.VolumeOps, "_attach_volume")
def test_attach_volume_default_hotplug_connect_volume(self, mock_attach):
self.ops.connect_volume({})
mock_attach.assert_called_once_with({})
@mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
@mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
@mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
@mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
def test_attach_volume_with_defaults(self, mock_attach, mock_hypervisor,
mock_provider, mock_driver):
connection_info = {"data": {}}
with mock.patch.object(self.session.VDI, "get_uuid") as mock_vdi:
mock_provider.return_value = ("sr_ref", "sr_uuid")
mock_vdi.return_value = "vdi_uuid"
result = self.ops._attach_volume(connection_info)
self.assertEqual(result, ("sr_uuid", "vdi_uuid"))
mock_driver.assert_called_once_with(connection_info)
mock_provider.assert_called_once_with({}, None)
mock_hypervisor.assert_called_once_with("sr_ref", {})
self.assertFalse(mock_attach.called)
@mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
@mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
@mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
@mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
def test_attach_volume_with_hot_attach(self, mock_attach, mock_hypervisor,
mock_provider, mock_driver):
connection_info = {"data": {}}
with mock.patch.object(self.session.VDI, "get_uuid") as mock_vdi:
mock_provider.return_value = ("sr_ref", "sr_uuid")
mock_hypervisor.return_value = "vdi_ref"
mock_vdi.return_value = "vdi_uuid"
result = self.ops._attach_volume(connection_info, "vm_ref",
"name", 2, True)
self.assertEqual(result, ("sr_uuid", "vdi_uuid"))
mock_driver.assert_called_once_with(connection_info)
mock_provider.assert_called_once_with({}, "name")
mock_hypervisor.assert_called_once_with("sr_ref", {})
mock_attach.assert_called_once_with("vdi_ref", "vm_ref", "name", 2,
True)
@mock.patch.object(volume_utils, "forget_sr")
@mock.patch.object(volumeops.VolumeOps, "_check_is_supported_driver_type")
@mock.patch.object(volumeops.VolumeOps, "_connect_to_volume_provider")
@mock.patch.object(volumeops.VolumeOps, "_connect_hypervisor_to_volume")
@mock.patch.object(volumeops.VolumeOps, "_attach_volume_to_vm")
def test_attach_volume_cleanup(self, mock_attach, mock_hypervisor,
mock_provider, mock_driver, mock_forget):
connection_info = {"data": {}}
mock_provider.return_value = ("sr_ref", "sr_uuid")
mock_hypervisor.side_effect = test.TestingException
self.assertRaises(test.TestingException,
self.ops._attach_volume, connection_info)
mock_driver.assert_called_once_with(connection_info)
mock_provider.assert_called_once_with({}, None)
mock_hypervisor.assert_called_once_with("sr_ref", {})
mock_forget.assert_called_once_with(self.session, "sr_ref")
self.assertFalse(mock_attach.called)
def test_check_is_supported_driver_type_pass_iscsi(self):
conn_info = {"driver_volume_type": "iscsi"}
self.ops._check_is_supported_driver_type(conn_info)
def test_check_is_supported_driver_type_pass_xensm(self):
conn_info = {"driver_volume_type": "xensm"}
self.ops._check_is_supported_driver_type(conn_info)
def test_check_is_supported_driver_type_pass_bad(self):
conn_info = {"driver_volume_type": "bad"}
self.assertRaises(exception.VolumeDriverNotFound,
self.ops._check_is_supported_driver_type, conn_info)
@mock.patch.object(volume_utils, "introduce_sr")
@mock.patch.object(volume_utils, "find_sr_by_uuid")
@mock.patch.object(volume_utils, "parse_sr_info")
def test_connect_to_volume_provider_new_sr(self, mock_parse, mock_find_sr,
mock_introduce_sr):
mock_parse.return_value = ("uuid", "label", "params")
mock_find_sr.return_value = None
mock_introduce_sr.return_value = "sr_ref"
ref, uuid = self.ops._connect_to_volume_provider({}, "name")
self.assertEqual("sr_ref", ref)
self.assertEqual("uuid", uuid)
mock_parse.assert_called_once_with({}, "Disk-for:name")
mock_find_sr.assert_called_once_with(self.session, "uuid")
mock_introduce_sr.assert_called_once_with(self.session, "uuid",
"label", "params")
@mock.patch.object(volume_utils, "introduce_sr")
@mock.patch.object(volume_utils, "find_sr_by_uuid")
@mock.patch.object(volume_utils, "parse_sr_info")
def test_connect_to_volume_provider_old_sr(self, mock_parse, mock_find_sr,
mock_introduce_sr):
mock_parse.return_value = ("uuid", "label", "params")
mock_find_sr.return_value = "sr_ref"
ref, uuid = self.ops._connect_to_volume_provider({}, "name")
self.assertEqual("sr_ref", ref)
self.assertEqual("uuid", uuid)
mock_parse.assert_called_once_with({}, "Disk-for:name")
mock_find_sr.assert_called_once_with(self.session, "uuid")
self.assertFalse(mock_introduce_sr.called)
@mock.patch.object(volume_utils, "introduce_vdi")
def test_connect_hypervisor_to_volume_regular(self, mock_intro):
mock_intro.return_value = "vdi"
result = self.ops._connect_hypervisor_to_volume("sr", {})
self.assertEqual("vdi", result)
mock_intro.assert_called_once_with(self.session, "sr")
@mock.patch.object(volume_utils, "introduce_vdi")
def test_connect_hypervisor_to_volume_vdi(self, mock_intro):
mock_intro.return_value = "vdi"
conn = {"vdi_uuid": "id"}
result = self.ops._connect_hypervisor_to_volume("sr", conn)
self.assertEqual("vdi", result)
mock_intro.assert_called_once_with(self.session, "sr",
vdi_uuid="id")
@mock.patch.object(volume_utils, "introduce_vdi")
def test_connect_hypervisor_to_volume_lun(self, mock_intro):
mock_intro.return_value = "vdi"
conn = {"target_lun": "lun"}
result = self.ops._connect_hypervisor_to_volume("sr", conn)
self.assertEqual("vdi", result)
mock_intro.assert_called_once_with(self.session, "sr",
target_lun="lun")
@mock.patch.object(vm_utils, "is_vm_shutdown")
@mock.patch.object(vm_utils, "create_vbd")
def test_attach_volume_to_vm_plug(self, mock_vbd, mock_shutdown):
mock_vbd.return_value = "vbd"
mock_shutdown.return_value = False
with mock.patch.object(self.session.VBD, "plug") as mock_plug:
self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, True)
mock_plug.assert_called_once_with("vbd", "vm")
mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
bootable=False, osvol=True)
mock_shutdown.assert_called_once_with(self.session, "vm")
@mock.patch.object(vm_utils, "is_vm_shutdown")
@mock.patch.object(vm_utils, "create_vbd")
def test_attach_volume_to_vm_no_plug(self, mock_vbd, mock_shutdown):
mock_vbd.return_value = "vbd"
mock_shutdown.return_value = True
with mock.patch.object(self.session.VBD, "plug") as mock_plug:
self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, True)
self.assertFalse(mock_plug.called)
mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
bootable=False, osvol=True)
mock_shutdown.assert_called_once_with(self.session, "vm")
@mock.patch.object(vm_utils, "is_vm_shutdown")
@mock.patch.object(vm_utils, "create_vbd")
def test_attach_volume_to_vm_no_hotplug(self, mock_vbd, mock_shutdown):
mock_vbd.return_value = "vbd"
with mock.patch.object(self.session.VBD, "plug") as mock_plug:
self.ops._attach_volume_to_vm("vdi", "vm", "name", 2, False)
self.assertFalse(mock_plug.called)
mock_vbd.assert_called_once_with(self.session, "vm", "vdi", 2,
bootable=False, osvol=True)
self.assertFalse(mock_shutdown.called)
class FindBadVolumeTestCase(VolumeOpsTestBase):
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_find_bad_volumes_no_vbds(self, mock_get_all):
mock_get_all.return_value = []
result = self.ops.find_bad_volumes("vm_ref")
mock_get_all.assert_called_once_with("vm_ref")
self.assertEqual([], result)
@mock.patch.object(volume_utils, "find_sr_from_vbd")
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_find_bad_volumes_no_bad_vbds(self, mock_get_all, mock_find_sr):
mock_get_all.return_value = ["1", "2"]
mock_find_sr.return_value = "sr_ref"
with mock.patch.object(self.session.SR, "scan") as mock_scan:
result = self.ops.find_bad_volumes("vm_ref")
mock_get_all.assert_called_once_with("vm_ref")
expected_find = [mock.call(self.session, "1"),
mock.call(self.session, "2")]
self.assertEqual(expected_find, mock_find_sr.call_args_list)
expected_scan = [mock.call("sr_ref"), mock.call("sr_ref")]
self.assertEqual(expected_scan, mock_scan.call_args_list)
self.assertEqual([], result)
@mock.patch.object(volume_utils, "find_sr_from_vbd")
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_find_bad_volumes_bad_vbds(self, mock_get_all, mock_find_sr):
mock_get_all.return_value = ["vbd_ref"]
mock_find_sr.return_value = "sr_ref"
class FakeException(Exception):
details = ['SR_BACKEND_FAILURE_40', "", "", ""]
session = mock.Mock()
session.XenAPI.Failure = FakeException
self.ops._session = session
with mock.patch.object(session.SR, "scan") as mock_scan:
with mock.patch.object(session.VBD,
"get_device") as mock_get:
mock_scan.side_effect = FakeException
mock_get.return_value = "xvdb"
result = self.ops.find_bad_volumes("vm_ref")
mock_get_all.assert_called_once_with("vm_ref")
mock_scan.assert_called_once_with("sr_ref")
mock_get.assert_called_once_with("vbd_ref")
self.assertEqual(["/dev/xvdb"], result)
@mock.patch.object(volume_utils, "find_sr_from_vbd")
@mock.patch.object(volumeops.VolumeOps, "_get_all_volume_vbd_refs")
def test_find_bad_volumes_raises(self, mock_get_all, mock_find_sr):
mock_get_all.return_value = ["vbd_ref"]
mock_find_sr.return_value = "sr_ref"
class FakeException(Exception):
details = ['foo', "", "", ""]
session = mock.Mock()
session.XenAPI.Failure = FakeException
self.ops._session = session
with mock.patch.object(session.SR, "scan") as mock_scan:
with mock.patch.object(session.VBD,
"get_device") as mock_get:
mock_scan.side_effect = FakeException
mock_get.return_value = "xvdb"
self.assertRaises(FakeException,
self.ops.find_bad_volumes, "vm_ref")
mock_scan.assert_called_once_with("sr_ref")
class CleanupFromVDIsTestCase(VolumeOpsTestBase):
def _check_find_purge_calls(self, find_sr_from_vdi, purge_sr, vdi_refs,
sr_refs):
find_sr_calls = [mock.call(self.ops._session, vdi_ref) for vdi_ref
in vdi_refs]
find_sr_from_vdi.assert_has_calls(find_sr_calls)
purge_sr_calls = [mock.call(self.ops._session, sr_ref) for sr_ref
in sr_refs]
purge_sr.assert_has_calls(purge_sr_calls)
@mock.patch.object(volume_utils, 'find_sr_from_vdi')
@mock.patch.object(volume_utils, 'purge_sr')
def test_safe_cleanup_from_vdis(self, purge_sr, find_sr_from_vdi):
vdi_refs = ['vdi_ref1', 'vdi_ref2']
sr_refs = ['sr_ref1', 'sr_ref2']
find_sr_from_vdi.side_effect = sr_refs
self.ops.safe_cleanup_from_vdis(vdi_refs)
self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
sr_refs)
@mock.patch.object(volume_utils, 'find_sr_from_vdi',
side_effect=[exception.StorageError(reason=''), 'sr_ref2'])
@mock.patch.object(volume_utils, 'purge_sr')
def test_safe_cleanup_from_vdis_handles_find_sr_exception(self, purge_sr,
find_sr_from_vdi):
vdi_refs = ['vdi_ref1', 'vdi_ref2']
sr_refs = ['sr_ref2']
find_sr_from_vdi.side_effect = [exception.StorageError(reason=''),
sr_refs[0]]
self.ops.safe_cleanup_from_vdis(vdi_refs)
self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
sr_refs)
@mock.patch.object(volume_utils, 'find_sr_from_vdi')
@mock.patch.object(volume_utils, 'purge_sr')
def test_safe_cleanup_from_vdis_handles_purge_sr_exception(self, purge_sr,
find_sr_from_vdi):
vdi_refs = ['vdi_ref1', 'vdi_ref2']
sr_refs = ['sr_ref1', 'sr_ref2']
find_sr_from_vdi.side_effect = sr_refs
purge_sr.side_effects = [test.TestingException, None]
self.ops.safe_cleanup_from_vdis(vdi_refs)
self._check_find_purge_calls(find_sr_from_vdi, purge_sr, vdi_refs,
sr_refs)
|
ghber/My-Django-Nonrel
|
refs/heads/master
|
django/test/utils.py
|
185
|
import sys
import time
import os
import warnings
from django.conf import settings
from django.core import mail
from django.core.mail.backends import locmem
from django.test import signals
from django.template import Template
from django.utils.translation import deactivate
__all__ = ('Approximate', 'ContextList', 'setup_test_environment',
'teardown_test_environment', 'get_runner')
class Approximate(object):
def __init__(self, val, places=7):
self.val = val
self.places = places
def __repr__(self):
return repr(self.val)
def __eq__(self, other):
if self.val == other:
return True
return round(abs(self.val-other), self.places) == 0
class ContextList(list):
"""A wrapper that provides direct key access to context items contained
in a list of context objects.
"""
def __getitem__(self, key):
if isinstance(key, basestring):
for subcontext in self:
if key in subcontext:
return subcontext[key]
raise KeyError(key)
else:
return super(ContextList, self).__getitem__(key)
def __contains__(self, key):
try:
value = self[key]
except KeyError:
return False
return True
def instrumented_test_render(self, context):
"""
An instrumented Template render method, providing a signal
that can be intercepted by the test system Client
"""
signals.template_rendered.send(sender=self, template=self, context=context)
return self.nodelist.render(context)
def setup_test_environment():
"""Perform any global pre-test setup. This involves:
- Installing the instrumented test renderer
- Set the email backend to the locmem email backend.
- Setting the active locale to match the LANGUAGE_CODE setting.
"""
Template.original_render = Template._render
Template._render = instrumented_test_render
mail.original_SMTPConnection = mail.SMTPConnection
mail.SMTPConnection = locmem.EmailBackend
mail.original_email_backend = settings.EMAIL_BACKEND
settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
mail.outbox = []
deactivate()
def teardown_test_environment():
"""Perform any global post-test teardown. This involves:
- Restoring the original test renderer
- Restoring the email sending functions
"""
Template._render = Template.original_render
del Template.original_render
mail.SMTPConnection = mail.original_SMTPConnection
del mail.original_SMTPConnection
settings.EMAIL_BACKEND = mail.original_email_backend
del mail.original_email_backend
del mail.outbox
def get_warnings_state():
"""
Returns an object containing the state of the warnings module
"""
# There is no public interface for doing this, but this implementation of
# get_warnings_state and restore_warnings_state appears to work on Python
# 2.4 to 2.7.
return warnings.filters[:]
def restore_warnings_state(state):
"""
Restores the state of the warnings module when passed an object that was
returned by get_warnings_state()
"""
warnings.filters = state[:]
def get_runner(settings):
test_path = settings.TEST_RUNNER.split('.')
# Allow for Python 2.5 relative paths
if len(test_path) > 1:
test_module_name = '.'.join(test_path[:-1])
else:
test_module_name = '.'
test_module = __import__(test_module_name, {}, {}, test_path[-1])
test_runner = getattr(test_module, test_path[-1])
return test_runner
|
iwitaly/psycopg2-for-aws-lambda
|
refs/heads/master
|
psycopg2/_json.py
|
15
|
"""Implementation of the JSON adaptation objects
This module exists to avoid a circular import problem: pyscopg2.extras depends
on psycopg2.extension, so I can't create the default JSON typecasters in
extensions importing register_json from extras.
"""
# psycopg/_json.py - Implementation of the JSON adaptation objects
#
# Copyright (C) 2012 Daniele Varrazzo <daniele.varrazzo@gmail.com>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import sys
from psycopg2._psycopg import ISQLQuote, QuotedString
from psycopg2._psycopg import new_type, new_array_type, register_type
# import the best json implementation available
if sys.version_info[:2] >= (2, 6):
import json
else:
try:
import simplejson as json
except ImportError:
json = None
# oids from PostgreSQL 9.2
JSON_OID = 114
JSONARRAY_OID = 199
# oids from PostgreSQL 9.4
JSONB_OID = 3802
JSONBARRAY_OID = 3807
class Json(object):
"""
An `~psycopg2.extensions.ISQLQuote` wrapper to adapt a Python object to
:sql:`json` data type.
`!Json` can be used to wrap any object supported by the provided *dumps*
function. If none is provided, the standard :py:func:`json.dumps()` is
used (`!simplejson` for Python < 2.6;
`~psycopg2.extensions.ISQLQuote.getquoted()` will raise `!ImportError` if
the module is not available).
"""
def __init__(self, adapted, dumps=None):
self.adapted = adapted
if dumps is not None:
self._dumps = dumps
elif json is not None:
self._dumps = json.dumps
else:
self._dumps = None
def __conform__(self, proto):
if proto is ISQLQuote:
return self
def dumps(self, obj):
"""Serialize *obj* in JSON format.
The default is to call `!json.dumps()` or the *dumps* function
provided in the constructor. You can override this method to create a
customized JSON wrapper.
"""
dumps = self._dumps
if dumps is not None:
return dumps(obj)
else:
raise ImportError(
"json module not available: "
"you should provide a dumps function")
def getquoted(self):
s = self.dumps(self.adapted)
return QuotedString(s).getquoted()
if sys.version_info < (3,):
def __str__(self):
return self.getquoted()
else:
def __str__(self):
# getquoted is binary in Py3
return self.getquoted().decode('ascii', 'replace')
def register_json(conn_or_curs=None, globally=False, loads=None,
oid=None, array_oid=None, name='json'):
"""Create and register typecasters converting :sql:`json` type to Python objects.
:param conn_or_curs: a connection or cursor used to find the :sql:`json`
and :sql:`json[]` oids; the typecasters are registered in a scope
limited to this object, unless *globally* is set to `!True`. It can be
`!None` if the oids are provided
:param globally: if `!False` register the typecasters only on
*conn_or_curs*, otherwise register them globally
:param loads: the function used to parse the data into a Python object. If
`!None` use `!json.loads()`, where `!json` is the module chosen
according to the Python version (see above)
:param oid: the OID of the :sql:`json` type if known; If not, it will be
queried on *conn_or_curs*
:param array_oid: the OID of the :sql:`json[]` array type if known;
if not, it will be queried on *conn_or_curs*
:param name: the name of the data type to look for in *conn_or_curs*
The connection or cursor passed to the function will be used to query the
database and look for the OID of the :sql:`json` type (or an alternative
type if *name* if provided). No query is performed if *oid* and *array_oid*
are provided. Raise `~psycopg2.ProgrammingError` if the type is not found.
"""
if oid is None:
oid, array_oid = _get_json_oids(conn_or_curs, name)
JSON, JSONARRAY = _create_json_typecasters(
oid, array_oid, loads=loads, name=name.upper())
register_type(JSON, not globally and conn_or_curs or None)
if JSONARRAY is not None:
register_type(JSONARRAY, not globally and conn_or_curs or None)
return JSON, JSONARRAY
def register_default_json(conn_or_curs=None, globally=False, loads=None):
"""
Create and register :sql:`json` typecasters for PostgreSQL 9.2 and following.
Since PostgreSQL 9.2 :sql:`json` is a builtin type, hence its oid is known
and fixed. This function allows specifying a customized *loads* function
for the default :sql:`json` type without querying the database.
All the parameters have the same meaning of `register_json()`.
"""
return register_json(conn_or_curs=conn_or_curs, globally=globally,
loads=loads, oid=JSON_OID, array_oid=JSONARRAY_OID)
def register_default_jsonb(conn_or_curs=None, globally=False, loads=None):
"""
Create and register :sql:`jsonb` typecasters for PostgreSQL 9.4 and following.
As in `register_default_json()`, the function allows to register a
customized *loads* function for the :sql:`jsonb` type at its known oid for
PostgreSQL 9.4 and following versions. All the parameters have the same
meaning of `register_json()`.
"""
return register_json(conn_or_curs=conn_or_curs, globally=globally,
loads=loads, oid=JSONB_OID, array_oid=JSONBARRAY_OID, name='jsonb')
def _create_json_typecasters(oid, array_oid, loads=None, name='JSON'):
"""Create typecasters for json data type."""
if loads is None:
if json is None:
raise ImportError("no json module available")
else:
loads = json.loads
def typecast_json(s, cur):
if s is None:
return None
return loads(s)
JSON = new_type((oid, ), name, typecast_json)
if array_oid is not None:
JSONARRAY = new_array_type((array_oid, ), "%sARRAY" % name, JSON)
else:
JSONARRAY = None
return JSON, JSONARRAY
def _get_json_oids(conn_or_curs, name='json'):
# lazy imports
from psycopg2.extensions import STATUS_IN_TRANSACTION
from psycopg2.extras import _solve_conn_curs
conn, curs = _solve_conn_curs(conn_or_curs)
# Store the transaction status of the connection to revert it after use
conn_status = conn.status
# column typarray not available before PG 8.3
typarray = conn.server_version >= 80300 and "typarray" or "NULL"
# get the oid for the hstore
curs.execute(
"SELECT t.oid, %s FROM pg_type t WHERE t.typname = %%s;"
% typarray, (name,))
r = curs.fetchone()
# revert the status of the connection as before the command
if (conn_status != STATUS_IN_TRANSACTION and not conn.autocommit):
conn.rollback()
if not r:
raise conn.ProgrammingError("%s data type not found" % name)
return r
|
RandallDW/Aruba_plugin
|
refs/heads/Aruba_plugin
|
plugins/org.python.pydev.jython/Lib/distutils/command/install_egg_info.py
|
438
|
"""distutils.command.install_egg_info
Implements the Distutils 'install_egg_info' command, for installing
a package's PKG-INFO metadata."""
from distutils.cmd import Command
from distutils import log, dir_util
import os, sys, re
class install_egg_info(Command):
"""Install an .egg-info file for the package"""
description = "Install package's PKG-INFO metadata as an .egg-info file"
user_options = [
('install-dir=', 'd', "directory to install to"),
]
def initialize_options(self):
self.install_dir = None
def finalize_options(self):
self.set_undefined_options('install_lib',('install_dir','install_dir'))
basename = "%s-%s-py%s.egg-info" % (
to_filename(safe_name(self.distribution.get_name())),
to_filename(safe_version(self.distribution.get_version())),
sys.version[:3]
)
self.target = os.path.join(self.install_dir, basename)
self.outputs = [self.target]
def run(self):
target = self.target
if os.path.isdir(target) and not os.path.islink(target):
dir_util.remove_tree(target, dry_run=self.dry_run)
elif os.path.exists(target):
self.execute(os.unlink,(self.target,),"Removing "+target)
elif not os.path.isdir(self.install_dir):
self.execute(os.makedirs, (self.install_dir,),
"Creating "+self.install_dir)
log.info("Writing %s", target)
if not self.dry_run:
f = open(target, 'w')
self.distribution.metadata.write_pkg_file(f)
f.close()
def get_outputs(self):
return self.outputs
# The following routines are taken from setuptools' pkg_resources module and
# can be replaced by importing them from pkg_resources once it is included
# in the stdlib.
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""Convert an arbitrary string to a standard version string
Spaces become dots, and all other non-alphanumeric characters become
dashes, with runs of multiple dashes condensed to a single dash.
"""
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
|
dtaht/ns-3-codel
|
refs/heads/master
|
src/tools/bindings/modulegen__gcc_ILP32.py
|
32
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.tools', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## delay-jitter-estimation.h (module 'tools'): ns3::DelayJitterEstimation [class]
module.add_class('DelayJitterEstimation')
## event-garbage-collector.h (module 'tools'): ns3::EventGarbageCollector [class]
module.add_class('EventGarbageCollector')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## gnuplot.h (module 'tools'): ns3::Gnuplot [class]
module.add_class('Gnuplot')
## gnuplot.h (module 'tools'): ns3::GnuplotCollection [class]
module.add_class('GnuplotCollection')
## gnuplot.h (module 'tools'): ns3::GnuplotDataset [class]
module.add_class('GnuplotDataset')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration]
module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simulator.h (module 'core'): ns3::Simulator [class]
module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core')
## tag.h (module 'network'): ns3::Tag [class]
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## chunk.h (module 'network'): ns3::Chunk [class]
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## gnuplot.h (module 'tools'): ns3::Gnuplot2dDataset [class]
module.add_class('Gnuplot2dDataset', parent=root_module['ns3::GnuplotDataset'])
## gnuplot.h (module 'tools'): ns3::Gnuplot2dDataset::Style [enumeration]
module.add_enum('Style', ['LINES', 'POINTS', 'LINES_POINTS', 'DOTS', 'IMPULSES', 'STEPS', 'FSTEPS', 'HISTEPS'], outer_class=root_module['ns3::Gnuplot2dDataset'])
## gnuplot.h (module 'tools'): ns3::Gnuplot2dDataset::ErrorBars [enumeration]
module.add_enum('ErrorBars', ['NONE', 'X', 'Y', 'XY'], outer_class=root_module['ns3::Gnuplot2dDataset'])
## gnuplot.h (module 'tools'): ns3::Gnuplot2dFunction [class]
module.add_class('Gnuplot2dFunction', parent=root_module['ns3::GnuplotDataset'])
## gnuplot.h (module 'tools'): ns3::Gnuplot3dDataset [class]
module.add_class('Gnuplot3dDataset', parent=root_module['ns3::GnuplotDataset'])
## gnuplot.h (module 'tools'): ns3::Gnuplot3dFunction [class]
module.add_class('Gnuplot3dFunction', parent=root_module['ns3::GnuplotDataset'])
## header.h (module 'network'): ns3::Header [class]
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## trailer.h (module 'network'): ns3::Trailer [class]
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## event-impl.h (module 'core'): ns3::EventImpl [class]
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
## nix-vector.h (module 'network'): ns3::NixVector [class]
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## packet.h (module 'network'): ns3::Packet [class]
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
## nstime.h (module 'core'): ns3::TimeChecker [class]
module.add_class('TimeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3DelayJitterEstimation_methods(root_module, root_module['ns3::DelayJitterEstimation'])
register_Ns3EventGarbageCollector_methods(root_module, root_module['ns3::EventGarbageCollector'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3Gnuplot_methods(root_module, root_module['ns3::Gnuplot'])
register_Ns3GnuplotCollection_methods(root_module, root_module['ns3::GnuplotCollection'])
register_Ns3GnuplotDataset_methods(root_module, root_module['ns3::GnuplotDataset'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3Gnuplot2dDataset_methods(root_module, root_module['ns3::Gnuplot2dDataset'])
register_Ns3Gnuplot2dFunction_methods(root_module, root_module['ns3::Gnuplot2dFunction'])
register_Ns3Gnuplot3dDataset_methods(root_module, root_module['ns3::Gnuplot3dDataset'])
register_Ns3Gnuplot3dFunction_methods(root_module, root_module['ns3::Gnuplot3dFunction'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3TimeChecker_methods(root_module, root_module['ns3::TimeChecker'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'bool',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): bool ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'bool',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFullCopy() const [member function]
cls.add_method('CreateFullCopy',
'ns3::Buffer',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentEndOffset() const [member function]
cls.add_method('GetCurrentEndOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): int32_t ns3::Buffer::GetCurrentStartOffset() const [member function]
cls.add_method('GetCurrentStartOffset',
'int32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagIterator::Item',
[])
return
def register_Ns3ByteTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]
cls.add_method('GetEnd',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]
cls.add_method('GetStart',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor]
cls.add_constructor([])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]
cls.add_method('Add',
'ns3::TagBuffer',
[param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t adjustment, int32_t appendOffset) [member function]
cls.add_method('AddAtEnd',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'appendOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t adjustment, int32_t prependOffset) [member function]
cls.add_method('AddAtStart',
'void',
[param('int32_t', 'adjustment'), param('int32_t', 'prependOffset')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]
cls.add_method('Begin',
'ns3::ByteTagList::Iterator',
[param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')],
is_const=True)
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3ByteTagListIterator_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]
cls.add_method('GetOffsetStart',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagList::Iterator::Item',
[])
return
def register_Ns3ByteTagListIteratorItem_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]
cls.add_constructor([param('ns3::TagBuffer', 'buf')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable]
cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable]
cls.add_instance_attribute('end', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable]
cls.add_instance_attribute('start', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3DelayJitterEstimation_methods(root_module, cls):
## delay-jitter-estimation.h (module 'tools'): ns3::DelayJitterEstimation::DelayJitterEstimation(ns3::DelayJitterEstimation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DelayJitterEstimation const &', 'arg0')])
## delay-jitter-estimation.h (module 'tools'): ns3::DelayJitterEstimation::DelayJitterEstimation() [constructor]
cls.add_constructor([])
## delay-jitter-estimation.h (module 'tools'): ns3::Time ns3::DelayJitterEstimation::GetLastDelay() const [member function]
cls.add_method('GetLastDelay',
'ns3::Time',
[],
is_const=True)
## delay-jitter-estimation.h (module 'tools'): uint64_t ns3::DelayJitterEstimation::GetLastJitter() const [member function]
cls.add_method('GetLastJitter',
'uint64_t',
[],
is_const=True)
## delay-jitter-estimation.h (module 'tools'): static void ns3::DelayJitterEstimation::PrepareTx(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('PrepareTx',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')],
is_static=True)
## delay-jitter-estimation.h (module 'tools'): void ns3::DelayJitterEstimation::RecordRx(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('RecordRx',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
return
def register_Ns3EventGarbageCollector_methods(root_module, cls):
## event-garbage-collector.h (module 'tools'): ns3::EventGarbageCollector::EventGarbageCollector(ns3::EventGarbageCollector const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventGarbageCollector const &', 'arg0')])
## event-garbage-collector.h (module 'tools'): ns3::EventGarbageCollector::EventGarbageCollector() [constructor]
cls.add_constructor([])
## event-garbage-collector.h (module 'tools'): void ns3::EventGarbageCollector::Track(ns3::EventId event) [member function]
cls.add_method('Track',
'void',
[param('ns3::EventId', 'event')])
return
def register_Ns3EventId_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('==')
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventId const &', 'arg0')])
## event-id.h (module 'core'): ns3::EventId::EventId() [constructor]
cls.add_constructor([])
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])
## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function]
cls.add_method('GetTs',
'uint64_t',
[],
is_const=True)
## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function]
cls.add_method('PeekEventImpl',
'ns3::EventImpl *',
[],
is_const=True)
return
def register_Ns3Gnuplot_methods(root_module, cls):
## gnuplot.h (module 'tools'): ns3::Gnuplot::Gnuplot(ns3::Gnuplot const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Gnuplot const &', 'arg0')])
## gnuplot.h (module 'tools'): ns3::Gnuplot::Gnuplot(std::string const & outputFilename="", std::string const & title="") [constructor]
cls.add_constructor([param('std::string const &', 'outputFilename', default_value='""'), param('std::string const &', 'title', default_value='""')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot::AddDataset(ns3::GnuplotDataset const & dataset) [member function]
cls.add_method('AddDataset',
'void',
[param('ns3::GnuplotDataset const &', 'dataset')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot::AppendExtra(std::string const & extra) [member function]
cls.add_method('AppendExtra',
'void',
[param('std::string const &', 'extra')])
## gnuplot.h (module 'tools'): static std::string ns3::Gnuplot::DetectTerminal(std::string const & filename) [member function]
cls.add_method('DetectTerminal',
'std::string',
[param('std::string const &', 'filename')],
is_static=True)
## gnuplot.h (module 'tools'): void ns3::Gnuplot::GenerateOutput(std::ostream & os) const [member function]
cls.add_method('GenerateOutput',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## gnuplot.h (module 'tools'): void ns3::Gnuplot::SetExtra(std::string const & extra) [member function]
cls.add_method('SetExtra',
'void',
[param('std::string const &', 'extra')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot::SetLegend(std::string const & xLegend, std::string const & yLegend) [member function]
cls.add_method('SetLegend',
'void',
[param('std::string const &', 'xLegend'), param('std::string const &', 'yLegend')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot::SetTerminal(std::string const & terminal) [member function]
cls.add_method('SetTerminal',
'void',
[param('std::string const &', 'terminal')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot::SetTitle(std::string const & title) [member function]
cls.add_method('SetTitle',
'void',
[param('std::string const &', 'title')])
return
def register_Ns3GnuplotCollection_methods(root_module, cls):
## gnuplot.h (module 'tools'): ns3::GnuplotCollection::GnuplotCollection(ns3::GnuplotCollection const & arg0) [copy constructor]
cls.add_constructor([param('ns3::GnuplotCollection const &', 'arg0')])
## gnuplot.h (module 'tools'): ns3::GnuplotCollection::GnuplotCollection(std::string const & outputFilename) [constructor]
cls.add_constructor([param('std::string const &', 'outputFilename')])
## gnuplot.h (module 'tools'): void ns3::GnuplotCollection::AddPlot(ns3::Gnuplot const & plot) [member function]
cls.add_method('AddPlot',
'void',
[param('ns3::Gnuplot const &', 'plot')])
## gnuplot.h (module 'tools'): void ns3::GnuplotCollection::GenerateOutput(std::ostream & os) const [member function]
cls.add_method('GenerateOutput',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## gnuplot.h (module 'tools'): ns3::Gnuplot & ns3::GnuplotCollection::GetPlot(unsigned int id) [member function]
cls.add_method('GetPlot',
'ns3::Gnuplot &',
[param('unsigned int', 'id')])
## gnuplot.h (module 'tools'): void ns3::GnuplotCollection::SetTerminal(std::string const & terminal) [member function]
cls.add_method('SetTerminal',
'void',
[param('std::string const &', 'terminal')])
return
def register_Ns3GnuplotDataset_methods(root_module, cls):
## gnuplot.h (module 'tools'): ns3::GnuplotDataset::GnuplotDataset(ns3::GnuplotDataset const & original) [copy constructor]
cls.add_constructor([param('ns3::GnuplotDataset const &', 'original')])
## gnuplot.h (module 'tools'): static void ns3::GnuplotDataset::SetDefaultExtra(std::string const & extra) [member function]
cls.add_method('SetDefaultExtra',
'void',
[param('std::string const &', 'extra')],
is_static=True)
## gnuplot.h (module 'tools'): void ns3::GnuplotDataset::SetExtra(std::string const & extra) [member function]
cls.add_method('SetExtra',
'void',
[param('std::string const &', 'extra')])
## gnuplot.h (module 'tools'): void ns3::GnuplotDataset::SetTitle(std::string const & title) [member function]
cls.add_method('SetTitle',
'void',
[param('std::string const &', 'title')])
## gnuplot.h (module 'tools'): ns3::GnuplotDataset::GnuplotDataset(ns3::GnuplotDataset::Data * data) [constructor]
cls.add_constructor([param('ns3::GnuplotDataset::Data *', 'data')],
visibility='protected')
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]
cls.add_constructor([param('std::string', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3PacketMetadata_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]
cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[param('ns3::Buffer', 'buffer')],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]
cls.add_method('CreateFragment',
'ns3::PacketMetadata',
[param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function]
cls.add_method('Enable',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('RemoveTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor]
cls.add_constructor([])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable]
cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable]
cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]
cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable]
cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable]
cls.add_instance_attribute('isFragment', 'bool', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketMetadata::Item',
[])
return
def register_Ns3PacketTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketTagIterator::Item',
[])
return
def register_Ns3PacketTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3PacketTagList_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList const &', 'o')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]
cls.add_method('Add',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]
cls.add_method('Head',
'ns3::PacketTagList::TagData const *',
[],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]
cls.add_method('Peek',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]
cls.add_method('Remove',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable]
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable]
cls.add_instance_attribute('data', 'uint8_t [ 20 ]', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable]
cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Simulator_methods(root_module, cls):
## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Simulator const &', 'arg0')])
## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function]
cls.add_method('GetImplementation',
'ns3::Ptr< ns3::SimulatorImpl >',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function]
cls.add_method('IsFinished',
'bool',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Next() [member function]
cls.add_method('Next',
'ns3::Time',
[],
is_static=True, deprecated=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::RunOneEvent() [member function]
cls.add_method('RunOneEvent',
'void',
[],
is_static=True, deprecated=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function]
cls.add_method('SetImplementation',
'void',
[param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & time) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'time')],
is_static=True)
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h (module 'network'): ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info')],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short unsigned int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('unsigned char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('long int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('short int const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('signed char const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('double const', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Gnuplot2dDataset_methods(root_module, cls):
## gnuplot.h (module 'tools'): ns3::Gnuplot2dDataset::Gnuplot2dDataset(ns3::Gnuplot2dDataset const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Gnuplot2dDataset const &', 'arg0')])
## gnuplot.h (module 'tools'): ns3::Gnuplot2dDataset::Gnuplot2dDataset(std::string const & title="Untitled") [constructor]
cls.add_constructor([param('std::string const &', 'title', default_value='"Untitled"')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot2dDataset::Add(double x, double y) [member function]
cls.add_method('Add',
'void',
[param('double', 'x'), param('double', 'y')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot2dDataset::Add(double x, double y, double errorDelta) [member function]
cls.add_method('Add',
'void',
[param('double', 'x'), param('double', 'y'), param('double', 'errorDelta')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot2dDataset::Add(double x, double y, double xErrorDelta, double yErrorDelta) [member function]
cls.add_method('Add',
'void',
[param('double', 'x'), param('double', 'y'), param('double', 'xErrorDelta'), param('double', 'yErrorDelta')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot2dDataset::AddEmptyLine() [member function]
cls.add_method('AddEmptyLine',
'void',
[])
## gnuplot.h (module 'tools'): static void ns3::Gnuplot2dDataset::SetDefaultErrorBars(ns3::Gnuplot2dDataset::ErrorBars errorBars) [member function]
cls.add_method('SetDefaultErrorBars',
'void',
[param('ns3::Gnuplot2dDataset::ErrorBars', 'errorBars')],
is_static=True)
## gnuplot.h (module 'tools'): static void ns3::Gnuplot2dDataset::SetDefaultStyle(ns3::Gnuplot2dDataset::Style style) [member function]
cls.add_method('SetDefaultStyle',
'void',
[param('ns3::Gnuplot2dDataset::Style', 'style')],
is_static=True)
## gnuplot.h (module 'tools'): void ns3::Gnuplot2dDataset::SetErrorBars(ns3::Gnuplot2dDataset::ErrorBars errorBars) [member function]
cls.add_method('SetErrorBars',
'void',
[param('ns3::Gnuplot2dDataset::ErrorBars', 'errorBars')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot2dDataset::SetStyle(ns3::Gnuplot2dDataset::Style style) [member function]
cls.add_method('SetStyle',
'void',
[param('ns3::Gnuplot2dDataset::Style', 'style')])
return
def register_Ns3Gnuplot2dFunction_methods(root_module, cls):
## gnuplot.h (module 'tools'): ns3::Gnuplot2dFunction::Gnuplot2dFunction(ns3::Gnuplot2dFunction const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Gnuplot2dFunction const &', 'arg0')])
## gnuplot.h (module 'tools'): ns3::Gnuplot2dFunction::Gnuplot2dFunction(std::string const & title="Untitled", std::string const & function="") [constructor]
cls.add_constructor([param('std::string const &', 'title', default_value='"Untitled"'), param('std::string const &', 'function', default_value='""')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot2dFunction::SetFunction(std::string const & function) [member function]
cls.add_method('SetFunction',
'void',
[param('std::string const &', 'function')])
return
def register_Ns3Gnuplot3dDataset_methods(root_module, cls):
## gnuplot.h (module 'tools'): ns3::Gnuplot3dDataset::Gnuplot3dDataset(ns3::Gnuplot3dDataset const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Gnuplot3dDataset const &', 'arg0')])
## gnuplot.h (module 'tools'): ns3::Gnuplot3dDataset::Gnuplot3dDataset(std::string const & title="Untitled") [constructor]
cls.add_constructor([param('std::string const &', 'title', default_value='"Untitled"')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot3dDataset::Add(double x, double y, double z) [member function]
cls.add_method('Add',
'void',
[param('double', 'x'), param('double', 'y'), param('double', 'z')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot3dDataset::AddEmptyLine() [member function]
cls.add_method('AddEmptyLine',
'void',
[])
## gnuplot.h (module 'tools'): static void ns3::Gnuplot3dDataset::SetDefaultStyle(std::string const & style) [member function]
cls.add_method('SetDefaultStyle',
'void',
[param('std::string const &', 'style')],
is_static=True)
## gnuplot.h (module 'tools'): void ns3::Gnuplot3dDataset::SetStyle(std::string const & style) [member function]
cls.add_method('SetStyle',
'void',
[param('std::string const &', 'style')])
return
def register_Ns3Gnuplot3dFunction_methods(root_module, cls):
## gnuplot.h (module 'tools'): ns3::Gnuplot3dFunction::Gnuplot3dFunction(ns3::Gnuplot3dFunction const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Gnuplot3dFunction const &', 'arg0')])
## gnuplot.h (module 'tools'): ns3::Gnuplot3dFunction::Gnuplot3dFunction(std::string const & title="Untitled", std::string const & function="") [constructor]
cls.add_constructor([param('std::string const &', 'title', default_value='"Untitled"'), param('std::string const &', 'function', default_value='""')])
## gnuplot.h (module 'tools'): void ns3::Gnuplot3dFunction::SetFunction(std::string const & function) [member function]
cls.add_method('SetFunction',
'void',
[param('std::string const &', 'function')])
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h (module 'network'): ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Start() [member function]
cls.add_method('Start',
'void',
[])
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoStart() [member function]
cls.add_method('DoStart',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & value) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'value')])
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & from, ns3::Time::Unit timeUnit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'from'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit timeUnit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'timeUnit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit timeUnit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'timeUnit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor]
cls.add_constructor([])
## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Trailer const &', 'arg0')])
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'end')],
is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3EventImpl_methods(root_module, cls):
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventImpl const &', 'arg0')])
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor]
cls.add_constructor([])
## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[])
## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function]
cls.add_method('IsCancelled',
'bool',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function]
cls.add_method('Notify',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3NixVector_methods(root_module, cls):
cls.add_output_stream_operator()
## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor]
cls.add_constructor([])
## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor]
cls.add_constructor([param('ns3::NixVector const &', 'o')])
## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function]
cls.add_method('AddNeighborIndex',
'void',
[param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function]
cls.add_method('BitCount',
'uint32_t',
[param('uint32_t', 'numberOfNeighbors')],
is_const=True)
## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function]
cls.add_method('ExtractNeighborIndex',
'uint32_t',
[param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function]
cls.add_method('GetRemainingBits',
'uint32_t',
[])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3Packet_methods(root_module, cls):
cls.add_output_stream_operator()
## packet.h (module 'network'): ns3::Packet::Packet() [constructor]
cls.add_constructor([])
## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor]
cls.add_constructor([param('ns3::Packet const &', 'o')])
## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor]
cls.add_constructor([param('uint32_t', 'size')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header')])
## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddPacketTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer')])
## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function]
cls.add_method('EnablePrinting',
'void',
[],
is_static=True)
## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function]
cls.add_method('FindFirstMatchingByteTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function]
cls.add_method('GetByteTagIterator',
'ns3::ByteTagIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function]
cls.add_method('GetNixVector',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function]
cls.add_method('GetPacketTagIterator',
'ns3::PacketTagIterator',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet.h (module 'network'): uint8_t const * ns3::Packet::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
deprecated=True, is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header')],
is_const=True)
## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function]
cls.add_method('PeekPacketTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('PeekTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function]
cls.add_method('PrintByteTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function]
cls.add_method('PrintPacketTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function]
cls.add_method('RemoveAllByteTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function]
cls.add_method('RemoveAllPacketTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header')])
## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function]
cls.add_method('RemovePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('RemoveTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> arg0) [member function]
cls.add_method('SetNixVector',
'void',
[param('ns3::Ptr< ns3::NixVector >', 'arg0')])
return
def register_Ns3TimeChecker_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeChecker::TimeChecker(ns3::TimeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeChecker const &', 'arg0')])
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
|
ucsc-mus-strain-cactus/Comparative-Annotation-Toolkit
|
refs/heads/master
|
cat/consensus.py
|
1
|
"""
Generates consensus gene set.
This module takes as input the genePreds produced by transMap, AugustusTM(R) and AugustusCGP and generates a consensus
of these, producing a filtered gene set.
This process relies on a combination of metrics and evaluations loaded to a sqlite database by the classify module.
GFF3 tags generated in this process:
1. source_transcript: The name of the parent transcript, if it exists
2. source_gene: The name of the parent gene, if it exists
3. source_gene_common_name: The common name of the parent gene, if it is different from the source gene
4. transcript_mode: The name of the mode of operation that generated this transcript
5. transcript_class: One of possible_paralog, poor_alignment, putative_novel, putative_novel_isoform, ortholog
6. paralogy: The names of paralogous alignments
7. gene_biotype: gene biotype
8. transcript_biotype: transcript biotype
10. alternative_source_transcripts: Other possible transcripts, if this was collapsed as the result of deduplication
11. gene_alternate_contigs: contigs that this gene was also found on are comma separated in this tag.
12: transcript_modes: The mode(s) that generated this transcript
"""
import collections
import luigi
import logging
import pandas as pd
import tools.intervals
import tools.misc
import tools.mathOps
import tools.fileOps
import tools.sqlInterface
import tools.transcripts
import tools.nameConversions
import tools.procOps
from tools.defaultOrderedDict import DefaultOrderedDict
logger = logging.getLogger(__name__)
id_template = '{genome:.10}_{tag_type}{unique_id:07d}'
def generate_consensus(args):
"""
Main consensus finding logic.
:param args: Argument namespace from luigi
"""
# load all genePreds
tx_dict = tools.transcripts.load_gps(args.gp_list)
# load reference annotation information
ref_df = tools.sqlInterface.load_annotation(args.ref_db_path)
ref_biotype_counts = collections.Counter(ref_df.TranscriptBiotype)
coding_count = ref_biotype_counts['protein_coding']
non_coding_count = sum(y for x, y in ref_biotype_counts.iteritems() if x != 'protein_coding')
# gene transcript map to iterate over so that we capture missing gene information
gene_biotype_map = tools.sqlInterface.get_gene_biotype_map(args.ref_db_path)
transcript_biotype_map = tools.sqlInterface.get_transcript_biotype_map(args.ref_db_path)
# load transMap evaluation data
tm_eval_df = load_transmap_evals(args.db_path)
# load the homGeneMapping data for transMap/augTM/augTMR
tx_modes = [x for x in args.tx_modes if x in ['transMap', 'augTM', 'augTMR']]
hgm_df = pd.concat([load_hgm_vectors(args.db_path, tx_mode) for tx_mode in tx_modes])
# load the alignment metrics data
mrna_metrics_df = pd.concat([load_metrics_from_db(args.db_path, tx_mode, 'mRNA') for tx_mode in tx_modes])
cds_metrics_df = pd.concat([load_metrics_from_db(args.db_path, tx_mode, 'CDS') for tx_mode in tx_modes])
eval_df = pd.concat([load_evaluations_from_db(args.db_path, tx_mode) for tx_mode in tx_modes]).reset_index()
coding_df, non_coding_df = combine_and_filter_dfs(tx_dict, hgm_df, mrna_metrics_df, cds_metrics_df, tm_eval_df,
ref_df, eval_df, args.intron_rnaseq_support,
args.exon_rnaseq_support, args.intron_annot_support,
args.exon_annot_support, args.original_intron_support,
args.in_species_rna_support_only)
if len(coding_df) + len(non_coding_df) == 0:
raise RuntimeError('No transcripts pass filtering for species {}. '
'Consider lowering requirements. Please see the manual.'.format(args.genome))
elif len(coding_df) == 0 and coding_count > 0:
logger.warning('No protein coding transcripts pass filtering for species {}. '
'Consider lowering requirements. Please see the manual.'.format(args.genome))
elif len(non_coding_df) == 0 and non_coding_count > 0:
logger.warning('No non-coding transcripts pass filtering for species {}. '
'Consider lowering requirements. Please see the manual.'.format(args.genome))
scored_coding_df, scored_non_coding_df = score_filtered_dfs(coding_df, non_coding_df,
args.in_species_rna_support_only)
scored_df = merge_scored_dfs(scored_coding_df, scored_non_coding_df)
best_alignments = scored_df.groupby('TranscriptId')['TranscriptScore'].transform(max) == scored_df['TranscriptScore']
best_df = scored_df[best_alignments].reset_index()
# store some metrics for plotting
metrics = {'Transcript Missing': collections.Counter(),
'Gene Missing': collections.Counter(),
'Transcript Modes': collections.Counter(), # coding only
'Duplicate transcripts': collections.Counter(),
'Discarded by strand resolution': 0,
'Coverage': collections.defaultdict(list),
'Identity': collections.defaultdict(list),
'Splice Support': collections.defaultdict(list),
'Exon Support': collections.defaultdict(list),
'Original Introns': collections.defaultdict(list),
'Splice Annotation Support': collections.defaultdict(list),
'Exon Annotation Support': collections.defaultdict(list),
'IsoSeq Transcript Validation': collections.Counter()}
# we can keep track of missing stuff now
for gene_biotype, tx_df in best_df.groupby('GeneBiotype'):
biotype_genes = {gene_id for gene_id, b in gene_biotype_map.iteritems() if b == gene_biotype}
metrics['Gene Missing'][gene_biotype] = len(biotype_genes) - len(set(tx_df.GeneId))
for tx_biotype, tx_df in best_df.groupby('TranscriptBiotype'):
biotype_txs = {gene_id for gene_id, b in transcript_biotype_map.iteritems() if b == tx_biotype}
metrics['Transcript Missing'][tx_biotype] = len(biotype_txs) - len(set(tx_df.TranscriptId))
# main consensus finding -- using incorporate_tx to transform best scoring transcripts
# stores a mapping of alignment IDs to tags for the final consensus set
consensus_dict = {}
for (gene_id, tx_id), s in best_df.groupby(['GeneId', 'TranscriptId']):
aln_id, m = incorporate_tx(s, gene_id, metrics, args.hints_db_has_rnaseq)
consensus_dict[aln_id] = m
# if we ran in either denovo mode, load those data and detect novel genes
if len(args.denovo_tx_modes) > 0:
metrics['denovo'] = {}
for tx_mode in args.denovo_tx_modes:
metrics['denovo'][tx_mode] = {'Possible paralog': 0, 'Poor alignment': 0, 'Putative novel': 0,
'Possible fusion': 0, 'Putative novel isoform': 0}
denovo_dict = find_novel(args.db_path, tx_dict, consensus_dict, ref_df, metrics, gene_biotype_map,
args.denovo_num_introns, args.in_species_rna_support_only,
args.denovo_tx_modes, args.denovo_splice_support, args.denovo_exon_support)
consensus_dict.update(denovo_dict)
# perform final filtering steps
deduplicated_consensus = deduplicate_consensus(consensus_dict, tx_dict, metrics)
deduplicated_strand_resolved_consensus = resolve_opposite_strand(deduplicated_consensus, tx_dict, metrics)
if 'augPB' in args.denovo_tx_modes:
deduplicated_strand_resolved_consensus = validate_pacbio_splices(deduplicated_strand_resolved_consensus,
args.db_path, tx_dict, metrics,
args.require_pacbio_support)
gene_resolved_consensus = resolve_overlapping_cds_intervals(deduplicated_strand_resolved_consensus, tx_dict)
# sort by genomic interval for prettily increasing numbers
final_consensus = sorted(gene_resolved_consensus,
key=lambda (tx, attrs): (tx_dict[tx].chromosome, tx_dict[tx].start))
# calculate final gene set completeness
calculate_completeness(final_consensus, metrics)
# add some interesting metrics on how much using Augustus modes improved our results
if 'augTM' or 'augTMR' in tx_modes:
calculate_improvement_metrics(final_consensus, scored_df, tm_eval_df, hgm_df, metrics)
calculate_indel_metrics(final_consensus, eval_df, metrics)
# write out results. consensus tx dict has the unique names
consensus_gene_dict = write_consensus_gps(args.consensus_gp, args.consensus_gp_info,
final_consensus, tx_dict, args.genome)
write_consensus_gff3(consensus_gene_dict, args.consensus_gff3)
return metrics
def load_transmap_evals(db_path):
"""
Loads the database tables associated with both transMap filtering and transMap evaluation, merging them. Keep only
the columns related to paralog resolution and classification.
"""
# load transMap results
tm_eval = tools.sqlInterface.load_alignment_evaluation(db_path)
# load transMap filtering results
tm_filter_eval = tools.sqlInterface.load_filter_evaluation(db_path)
# combine transMap evaluation and transMap filtering into one table
# the transMap filtering columns are used for tags in the output
tm_eval_df = pd.merge(tm_eval, tm_filter_eval, on=['TranscriptId', 'AlignmentId'])
return tm_eval_df.drop('AlignmentId', axis=1)
def calculate_vector_support(s, resolve_nan=None, num_digits=4):
"""For vectors parsed by parse_text_vector(), convert to a percentage between 0 and 100"""
return 100 * tools.mathOps.format_ratio(len([x for x in s if x > 0]), len(s), resolve_nan=resolve_nan,
num_digits=num_digits)
def load_hgm_vectors(db_path, tx_mode):
"""
Loads the intron vector table output by the homGeneMapping module. Returns a DataFrame with the parsed vectors
as well as the combined score based on tx_mode -- for augCGP we look at the CDS annotation score rather than the
exon score because CGP has a coding-only model.
"""
session = tools.sqlInterface.start_session(db_path)
intron_table = tools.sqlInterface.tables['hgm'][tx_mode]
hgm_df = tools.sqlInterface.load_intron_vector(intron_table, session)
# start calculating support levels for consensus finding
cols = ['IntronAnnotSupport', 'ExonAnnotSupport', 'CdsAnnotSupport',
'ExonRnaSupport', 'IntronRnaSupport',
'AllSpeciesExonRnaSupport', 'AllSpeciesIntronRnaSupport']
for col in cols:
hgm_df[col] = [list(map(int, x)) if len(x[0]) > 0 else [] for x in hgm_df[col].str.split(',').tolist()]
hgm_df[col + 'Percent'] = hgm_df[col].apply(calculate_vector_support, resolve_nan=1)
return hgm_df
def load_metrics_from_db(db_path, tx_mode, aln_mode):
"""
Loads the alignment metrics for the mRNA/CDS alignments of transMap/AugustusTM/TMR
"""
session = tools.sqlInterface.start_session(db_path)
metrics_table = tools.sqlInterface.tables[aln_mode][tx_mode]['metrics']
metrics_df = tools.sqlInterface.load_metrics(metrics_table, session)
# unstack flattens the long-form data structure
metrics_df = metrics_df.set_index(['AlignmentId', 'classifier']).unstack('classifier')
metrics_df.columns = [col[1] for col in metrics_df.columns]
metrics_df = metrics_df.reset_index()
cols = ['AlnCoverage', 'AlnGoodness', 'AlnIdentity', 'PercentUnknownBases']
metrics_df[cols] = metrics_df[cols].apply(pd.to_numeric)
metrics_df['OriginalIntrons'] = metrics_df['OriginalIntrons'].fillna('')
metrics_df['OriginalIntrons'] = [list(map(int, x)) if len(x[0]) > 0 else [] for x in
metrics_df['OriginalIntrons'].str.split(',').tolist()]
metrics_df['OriginalIntronsPercent'] = metrics_df['OriginalIntrons'].apply(calculate_vector_support, resolve_nan=1)
session.close()
return metrics_df
def load_evaluations_from_db(db_path, tx_mode):
"""
Loads the indel information from the evaluation database. We give preference to CDS alignments, but fall back
to mRNA alignments.
"""
def aggfunc(s):
"""
Preferentially pick CDS stats over mRNA stats, if they exist
They only exist for coding transcripts, and only those whose CDS alignments didn't fail
"""
if s.value_CDS.any():
c = set(s[s.value_CDS > 0].name)
else:
c = set(s[s.value_mRNA > 0].name)
cols = ['Frameshift', 'CodingInsertion', 'CodingDeletion', 'CodingMult3Indel']
return pd.Series(('CodingDeletion' in c or 'CodingInsertion' in c,
'CodingInsertion' in c, 'CodingDeletion' in c,
'CodingMult3Deletion' in c or 'CodingMult3Insertion' in c), index=cols)
session = tools.sqlInterface.start_session(db_path)
cds_table = tools.sqlInterface.tables['CDS'][tx_mode]['evaluation']
mrna_table = tools.sqlInterface.tables['mRNA'][tx_mode]['evaluation']
cds_df = tools.sqlInterface.load_evaluation(cds_table, session)
mrna_df = tools.sqlInterface.load_evaluation(mrna_table, session)
cds_df = cds_df.set_index('AlignmentId')
mrna_df = mrna_df.set_index('AlignmentId')
merged = mrna_df.reset_index().merge(cds_df.reset_index(), how='outer', on=['AlignmentId', 'name'],
suffixes=['_mRNA', '_CDS'])
eval_df = merged.groupby('AlignmentId').apply(aggfunc)
return eval_df
def load_alt_names(db_path, denovo_tx_modes):
"""Load the alternative tx tables for augCGP/augPB"""
session = tools.sqlInterface.start_session(db_path)
r = []
for tx_mode in denovo_tx_modes:
table = tools.sqlInterface.AugCgpAlternativeGenes if tx_mode == 'augCGP' else tools.sqlInterface.AugPbAlternativeGenes
r.append(tools.sqlInterface.load_alternatives(table, session))
df = pd.concat(r)
# rename TranscriptId to AlignmentId. This is all super confusing and silly
# the reason is that homGeneMapping has the gene -> tx -> aln ID hierarchy we inherit to simplify things
df.columns = [x if x != 'TranscriptId' else 'AlignmentId' for x in df.columns]
return df
def combine_and_filter_dfs(tx_dict, hgm_df, mrna_metrics_df, cds_metrics_df, tm_eval_df, ref_df, eval_df,
intron_rnaseq_support, exon_rnaseq_support, intron_annot_support, exon_annot_support,
original_intron_support, in_species_rna_support_only):
"""
Updates the DataFrame based on support levels. Filters based on user-tunable flags for support levels.
:param tx_dict: dictionary of genePredTranscript objects. Used to remove things filtered out by transMap
:param hgm_df: df produced by load_hgm_vectors() (all transcripts)
:param mrna_metrics_df: df produced by load_metrics() (coding transcripts only) for mRNA alignments
:param cds_metrics_df: df produced by load_metrics() (coding transcripts only) for CDS alignments
:param tm_eval_df: df produced by load_transmap_evals() (all transcripts)
:param ref_df: df produced by tools.sqlInterface.load_annotation()
:param eval_df: produced by load_evaluations_from_db() (coding transcripts only)
:param intron_rnaseq_support: Value 0-100. Percent of introns that must be supported by RNAseq
:param exon_rnaseq_support: Value 0-100. Percent of exons supported by RNA-seq.
:param intron_annot_support: Value 0-100. Percent of introns supported by the reference.
:param exon_annot_support: Value 0-100. Percent of exons supported by the reference.
:param original_intron_support: Value 0-100. Percent of introns that must be supported by this specific annotation.
:param in_species_rna_support_only: Should we use the homGeneMapping vectors within-species or all-species?
:return: filtered and merged dataframe
"""
# add the reference information to gain biotype information
hgm_ref_df = pd.merge(hgm_df, ref_df, on=['GeneId', 'TranscriptId'])
# combine in homGeneMapping results
hgm_ref_tm_df = pd.merge(hgm_ref_df, tm_eval_df, on=['GeneId', 'TranscriptId'])
# remove filtered transMap
hgm_ref_tm_df = hgm_ref_tm_df[hgm_ref_tm_df.AlignmentId.isin(tx_dict.viewkeys())]
# split merged_df into coding and noncoding
coding_df = hgm_ref_tm_df[hgm_ref_tm_df.TranscriptBiotype == 'protein_coding']
non_coding_df = hgm_ref_tm_df[hgm_ref_tm_df.TranscriptBiotype != 'protein_coding']
# add metrics information to coding df
metrics_df = pd.merge(mrna_metrics_df, cds_metrics_df, on='AlignmentId', suffixes=['_mRNA', '_CDS'])
coding_df = pd.merge(coding_df, metrics_df, on='AlignmentId')
# add evaluation information to coding df, where possible. This adds information on frame shifts.
coding_df = pd.merge(coding_df, eval_df, on='AlignmentId', how='left')
# fill the original intron values to 100 so we don't filter them out -- means a no-intron gene
coding_df['OriginalIntronsPercent_mRNA'] = coding_df.OriginalIntronsPercent_mRNA.fillna(100)
coding_df['OriginalIntronsPercent_CDS'] = coding_df.OriginalIntronsPercent_CDS.fillna(100)
non_coding_df['TransMapOriginalIntronsPercent'] = non_coding_df.TransMapOriginalIntronsPercent.fillna(100)
# huge ugly filtering expression for coding transcripts
if in_species_rna_support_only is True:
filt = ((coding_df.OriginalIntronsPercent_mRNA >= original_intron_support) &
(coding_df.IntronAnnotSupportPercent >= intron_annot_support) &
(coding_df.IntronRnaSupportPercent >= intron_rnaseq_support) &
(coding_df.ExonAnnotSupportPercent >= exon_annot_support) &
(coding_df.ExonRnaSupportPercent >= exon_rnaseq_support))
else:
filt = ((coding_df.OriginalIntronsPercent_mRNA >= original_intron_support) &
(coding_df.IntronAnnotSupportPercent >= intron_annot_support) &
(coding_df.AllSpeciesIntronRnaSupportPercent >= intron_rnaseq_support) &
(coding_df.ExonAnnotSupportPercent >= exon_annot_support) &
(coding_df.AllSpeciesExonRnaSupportPercent >= exon_rnaseq_support))
coding_df = coding_df[filt]
# huge ugly filtering expression for non coding transcripts
if in_species_rna_support_only is True:
filt = ((non_coding_df.TransMapOriginalIntronsPercent >= original_intron_support) &
(non_coding_df.IntronAnnotSupportPercent >= intron_annot_support) &
(non_coding_df.IntronRnaSupportPercent >= intron_rnaseq_support) &
(non_coding_df.ExonAnnotSupportPercent >= exon_annot_support) &
(non_coding_df.ExonRnaSupportPercent >= exon_rnaseq_support))
else:
filt = ((non_coding_df.TransMapOriginalIntronsPercent >= original_intron_support) &
(non_coding_df.IntronAnnotSupportPercent >= intron_annot_support) &
(non_coding_df.AllSpeciesIntronRnaSupportPercent >= intron_rnaseq_support) &
(non_coding_df.ExonAnnotSupportPercent >= exon_annot_support) &
(non_coding_df.AllSpeciesExonRnaSupportPercent >= exon_rnaseq_support))
non_coding_df = non_coding_df[filt]
return coding_df, non_coding_df
def score_filtered_dfs(coding_df, non_coding_df, in_species_rna_support_only):
"""
Scores the alignments. The score is the additive combination of the following features:
1) Alignment identity.
2) Alignment coverage.
3) Intron annotation support.
4) Exon annotation support.
5) Original intron support.
If we have RNA-seq data, the following fields are also incorporated:
6) Intron RNA-seq support.
7) Exon RNA-seq support.
In some future world these features could be combined differently, maybe with some fancy machine learning.
Returns the dataframe sorted by scores after indexing.
"""
def score(s):
aln_id = s.AlnIdentity_CDS if s.TranscriptBiotype == 'protein_coding' else s.TransMapIdentity
aln_cov = s.AlnCoverage_CDS if s.TranscriptBiotype == 'protein_coding' else s.TransMapCoverage
orig_intron = s.OriginalIntronsPercent_mRNA if s.TranscriptBiotype == 'protein_coding' else s.TransMapOriginalIntronsPercent
if in_species_rna_support_only:
rna_support = s.ExonRnaSupportPercent + s.IntronRnaSupportPercent
else:
rna_support = s.AllSpeciesExonRnaSupportPercent + s.AllSpeciesIntronRnaSupportPercent
return aln_id + aln_cov + s.IntronAnnotSupportPercent + s.ExonAnnotSupportPercent + orig_intron + rna_support
for df in [coding_df, non_coding_df]:
if len(df) > 0:
df['TranscriptScore'] = df.apply(score, axis=1)
return coding_df, non_coding_df
def merge_scored_dfs(scored_coding_df, scored_non_coding_df):
"""Merges the scored dataframes by changing some names around"""
# for every non-coding TransMap metric, copy it to the other name
for m in ['Coverage', 'Identity', 'Goodness']:
scored_non_coding_df['Aln' + m + '_mRNA'] = scored_non_coding_df['TransMap' + m]
merged_df = pd.concat([scored_non_coding_df, scored_coding_df])
return merged_df
def validate_pacbio_splices(deduplicated_strand_resolved_consensus, db_path, tx_dict, metrics, require_pacbio_support):
"""
Tag transcripts as having PacBio support.
If users passed the --require-pacbio-support, remove any transcript which does not have support.
"""
iso_txs = tools.sqlInterface.load_isoseq_txs(db_path)
tx_ids, _ = zip(*deduplicated_strand_resolved_consensus)
txs = [tx_dict[tx_id] for tx_id in tx_ids]
clustered = tools.transcripts.cluster_txs(txs + iso_txs)
divided_clusters = tools.transcripts.divide_clusters(clustered, tx_ids)
subset_matches = tools.transcripts.calculate_subset_matches(divided_clusters)
# invert the subset_matches to extract all validated tx_ids
validated_ids = set()
for tx_list in subset_matches.itervalues():
for tx in tx_list:
validated_ids.add(tx.name)
# begin resolving
pb_resolved_consensus = []
for tx_id, d in deduplicated_strand_resolved_consensus:
if tx_id in validated_ids:
d['pacbio_isoform_supported'] = True
metrics['IsoSeq Transcript Validation'][True] += 1
pb_resolved_consensus.append([tx_id, d])
elif require_pacbio_support is False:
d['pacbio_isoform_supported'] = False
metrics['IsoSeq Transcript Validation'][False] += 1
pb_resolved_consensus.append([tx_id, d])
# if require_pacbio_support is True, then we don't save this transcript
return pb_resolved_consensus
def incorporate_tx(best_rows, gene_id, metrics, hints_db_has_rnaseq):
"""incorporate a transcript into the consensus set, storing metrics."""
best_series = best_rows.iloc[0]
transcript_modes = evaluate_ties(best_rows)
# construct the tags for this transcript
d = {'source_transcript': best_series.TranscriptId,
'source_transcript_name': best_series.TranscriptName,
'source_gene': gene_id,
'score': int(10 * round(best_series.AlnGoodness_mRNA, 3)),
'transcript_modes': transcript_modes,
'gene_biotype': best_series.GeneBiotype,
'transcript_biotype': best_series.TranscriptBiotype,
'alignment_id': str(best_series.AlignmentId),
'frameshift': str(best_series.get('Frameshift', None)),
'exon_annotation_support': ','.join(map(str, best_series.ExonAnnotSupport)),
'intron_annotation_support': ','.join(map(str, best_series.IntronAnnotSupport)),
'transcript_class': 'ortholog',
'valid_start': bool(best_series.ValidStart),
'valid_stop': bool(best_series.ValidStop),
'adj_start': best_series.AdjStart_mRNA,
'adj_stop': best_series.AdjStop_mRNA,
'proper_orf': bool(best_series.ProperOrf)}
if hints_db_has_rnaseq is True:
d['exon_rna_support'] = ','.join(map(str, best_series.ExonRnaSupport))
d['intron_rna_support'] = ','.join(map(str, best_series.IntronRnaSupport))
if best_series.Paralogy is not None:
d['paralogy'] = best_series.Paralogy
if best_series.GeneAlternateLoci is not None:
d['gene_alternate_contigs'] = best_series.GeneAlternateLoci
if best_series.CollapsedGeneIds is not None:
d['collapsed_gene_ids'] = best_series.CollapsedGeneIds
if best_series.CollapsedGeneNames is not None:
d['collapsed_gene_names'] = best_series.CollapsedGeneNames
if best_series.PossibleSplitGeneLocations is not None:
d['possible_split_gene_locations'] = best_series.PossibleSplitGeneLocations
if best_series.GeneName is not None:
d['source_gene_common_name'] = best_series.GeneName
# add information to the overall metrics
if best_series.TranscriptBiotype == 'protein_coding':
metrics['Transcript Modes'][transcript_modes] += 1
metrics['Coverage'][best_series.TranscriptBiotype].append(best_series.AlnCoverage_mRNA)
metrics['Identity'][best_series.TranscriptBiotype].append(best_series.AlnIdentity_mRNA)
metrics['Splice Support'][best_series.TranscriptBiotype].append(best_series.IntronRnaSupportPercent)
metrics['Exon Support'][best_series.TranscriptBiotype].append(best_series.ExonRnaSupportPercent)
metrics['Splice Annotation Support'][best_series.TranscriptBiotype].append(best_series.IntronAnnotSupportPercent)
metrics['Exon Annotation Support'][best_series.TranscriptBiotype].append(best_series.ExonAnnotSupportPercent)
metrics['Original Introns'][best_series.TranscriptBiotype].append(best_series.OriginalIntronsPercent_mRNA)
return best_series.AlignmentId, d
def evaluate_ties(best_rows):
"""Find out how many transcript modes agreed on this"""
return ','.join(sorted(set([tools.nameConversions.alignment_type(x) for x in best_rows.AlignmentId])))
def find_novel(db_path, tx_dict, consensus_dict, ref_df, metrics, gene_biotype_map, denovo_num_introns,
in_species_rna_support_only, denovo_tx_modes, denovo_splice_support, denovo_exon_support):
"""
Finds novel loci, builds their attributes. Only calls novel loci if they have sufficient intron and splice support
as defined by the user.
Putative novel loci can fall into three categories:
1) PossibleParlog -- the transcript has no assigned genes, but has alternative genes
2) PoorMapping -- the transcript has no assigned or alternative genes but has exons/introns supported by annotation
3) PutativeNovel -- the transcript has no matches to the reference
4) PossibleFusion -- the transcript was flagged in parent finding as a fusion, and has all valid splices
Also finds novel splice junctions in CGP/PB transcripts. A novel splice junction is defined as a splice which
homGeneMapping did not map over and which is supported by RNA-seq.
"""
def is_novel(s):
"""
Determine if this transcript is possibly novel. If it is assigned a gene ID, pass this off to
is_novel_supported()
"""
if s.AssignedGeneId is not None:
return is_novel_supported(s)
if s.ResolutionMethod == 'badAnnotOrTm':
return None
elif s.ResolutionMethod == 'ambiguousOrFusion' and s.IntronRnaSupportPercent != 100:
return None
# validate the support level
intron = s.IntronRnaSupportPercent if in_species_rna_support_only else s.AllSpeciesIntronRnaSupportPercent
exon = s.ExonRnaSupportPercent if in_species_rna_support_only else s.AllSpeciesExonRnaSupportPercent
if not (intron >= denovo_splice_support and exon >= denovo_exon_support and len(s.IntronRnaSupport) > 0):
return None
# if we previously flagged this as ambiguousOrFusion, propagate this tag
if s.ResolutionMethod == 'ambiguousOrFusion':
return 'possible_fusion'
# if we have alternatives, this is not novel but could be a gene family expansion
elif s.AlternativeGeneIds is not None:
return 'possible_paralog'
# this may be a poor mapping
elif bool(s.ExonAnnotSupportPercent > 0 or s.CdsAnnotSupportPercent > 0 or s.IntronAnnotSupportPercent > 0):
return 'poor_alignment'
# this is looking pretty novel, could still be a mapping problem in a complex region though
else:
return 'putative_novel'
def is_novel_supported(s):
"""Is this CGP/PB transcript with an assigned gene ID supported and have a novel splice?"""
denovo_tx_obj = tx_dict[s.AlignmentId]
if len(denovo_tx_obj.intron_intervals) < denovo_num_introns:
return None
elif in_species_rna_support_only and s.ExonRnaSupportPercent <= denovo_exon_support or \
s.IntronRnaSupportPercent <= denovo_splice_support:
return None
elif in_species_rna_support_only is False and s.AllSpeciesExonRnaSupportPercent <= denovo_exon_support or \
s.AllSpeciesIntronRnaSupportPercent <= denovo_splice_support:
return None
new_supported_splices = set()
intron_vector = s.IntronRnaSupport if in_species_rna_support_only else s.AllSpeciesIntronRnaSupport
for intron, rna in zip(*[denovo_tx_obj.intron_intervals, intron_vector]):
if rna > 0 and intron not in existing_splices:
new_supported_splices.add(intron)
if len(new_supported_splices) == 0:
return None
# if any splices are both not supported by annotation and supported by RNA, call this as novel
if any(annot == 0 and i in new_supported_splices for i, annot in zip(*[denovo_tx_obj.intron_intervals,
s.IntronAnnotSupport])):
metrics['Transcript Modes'][tx_mode] += 1
tx_class = 'putative_novel_isoform'
# if any splices are new, and supported by RNA-seq call this poor alignment
else:
tx_class = 'poor_alignment'
return tx_class
denovo_hgm_df = pd.concat([load_hgm_vectors(db_path, tx_mode) for tx_mode in denovo_tx_modes])
# remove the TranscriptId and GeneId columns so they can be populated by others
denovo_hgm_df = denovo_hgm_df.drop(['GeneId', 'TranscriptId'], axis=1)
# load the alignment metrics data
denovo_alt_names = load_alt_names(db_path, denovo_tx_modes)
denovo_df = pd.merge(denovo_hgm_df, denovo_alt_names, on='AlignmentId')
common_name_map = dict(zip(*[ref_df.GeneId, ref_df.GeneName]))
denovo_df['CommonName'] = [common_name_map.get(x, None) for x in denovo_df.AssignedGeneId]
denovo_df['GeneBiotype'] = [gene_biotype_map.get(x, None) for x in denovo_df.AssignedGeneId]
# extract all splices we have already seen
existing_splices = set()
for consensus_tx in consensus_dict:
existing_splices.update(tx_dict[consensus_tx].intron_intervals)
# apply the novel finding functions
denovo_df['TranscriptClass'] = denovo_df.apply(is_novel, axis=1)
# types of transcripts for later
denovo_df['TranscriptMode'] = [tools.nameConversions.alignment_type(aln_id) for aln_id in denovo_df.AlignmentId]
# filter out non-novel as we as fusions
filtered_denovo_df = denovo_df[~denovo_df.TranscriptClass.isnull()]
filtered_denovo_df = filtered_denovo_df[filtered_denovo_df.TranscriptClass != 'possible_fusion']
# fill in missing fields for novel loci
filtered_denovo_df['GeneBiotype'] = filtered_denovo_df['GeneBiotype'].fillna('unknown_likely_coding')
# construct aln_id -> features map to return
denovo_tx_dict = {}
for _, s in filtered_denovo_df.iterrows():
aln_id = s.AlignmentId
tx_mode = s.TranscriptMode
denovo_tx_dict[aln_id] = {'source_gene': s.AssignedGeneId,
'transcript_class': s.TranscriptClass,
'transcript_biotype': 'unknown_likely_coding',
'gene_biotype': s.GeneBiotype,
'intron_rna_support': ','.join(map(str, s.IntronRnaSupport)),
'exon_rna_support': ','.join(map(str, s.ExonRnaSupport)),
'transcript_modes': tx_mode,
'exon_annotation_support': ','.join(map(str, s.ExonAnnotSupport)),
'intron_annotation_support': ','.join(map(str, s.IntronAnnotSupport)),
'alignment_id': aln_id,
'source_gene_common_name': s.CommonName,
'valid_start': True,
'valid_stop': True,
'proper_orf': True}
# record some metrics
metrics['denovo'][tx_mode][s.TranscriptClass.replace('_', ' ').capitalize()] += 1
metrics['Transcript Modes'][tx_mode] += 1
metrics['Splice Support']['unknown_likely_coding'].append(s.IntronRnaSupportPercent)
metrics['Exon Support']['unknown_likely_coding'].append(s.ExonRnaSupportPercent)
# record how many of each type we threw out
for tx_mode, df in denovo_df.groupby('TranscriptMode'):
metrics['denovo'][tx_mode]['Discarded'] = len(df[df.TranscriptClass.isnull()])
return denovo_tx_dict
def deduplicate_consensus(consensus_dict, tx_dict, metrics):
"""
In the process of consensus building, we may find that we have ended up with more than one transcript for a gene
that are actually identical. Remove these, picking the best based on their score, favoring the transcript
whose biotype matches the parent.
"""
def resolve_duplicate(tx_list, consensus_dict):
biotype_txs = [tx for tx in tx_list if
consensus_dict[tx].get('gene_biotype', None) == consensus_dict[tx].get('transcript_biotype',
None)]
if len(biotype_txs) > 0:
tx_list = biotype_txs
sorted_scores = sorted([[tx, consensus_dict[tx].get('score', 0)] for tx in tx_list],
key=lambda (tx, s): -s)
return sorted_scores[0][0]
def add_duplicate_field(best_tx, tx_list, consensus_dict, deduplicated_consensus):
deduplicated_consensus[best_tx] = consensus_dict[best_tx]
tx_list = [tools.nameConversions.strip_alignment_numbers(aln_id) for aln_id in tx_list]
best_tx_base = tools.nameConversions.strip_alignment_numbers(best_tx)
deduplicated_consensus[best_tx]['alternative_source_transcripts'] = ','.join(set(tx_list) - {best_tx_base})
# build a dictionary mapping duplicates making use of hashing intervals
duplicates = collections.defaultdict(list)
for aln_id in consensus_dict:
tx = tx_dict[aln_id]
duplicates[frozenset(tx.exon_intervals)].append(aln_id)
# begin iterating
deduplicated_consensus = {}
for tx_list in duplicates.itervalues():
if len(tx_list) > 1:
metrics['Duplicate transcripts'][len(tx_list)] += 1
best_tx = resolve_duplicate(tx_list, consensus_dict)
add_duplicate_field(best_tx, tx_list, consensus_dict, deduplicated_consensus)
else:
tx_id = tx_list[0]
deduplicated_consensus[tx_id] = consensus_dict[tx_id]
return deduplicated_consensus
def resolve_opposite_strand(deduplicated_consensus, tx_dict, metrics):
"""
Resolves situations where multiple transcripts of the same gene are on opposite strands. Does so by looking for
the largest sum of scores.
"""
gene_dict = collections.defaultdict(list)
for tx_id, attrs in deduplicated_consensus.iteritems():
tx_obj = tx_dict[tx_id]
gene_dict[tx_obj.name2].append([tx_obj, attrs])
deduplicated_strand_resolved_consensus = []
for gene in gene_dict:
tx_objs, attrs = zip(*gene_dict[gene])
if len(set(tx_obj.strand for tx_obj in tx_objs)) > 1:
strand_scores = collections.Counter()
for tx_obj, attrs in gene_dict[gene]:
strand_scores[tx_obj.strand] += attrs.get('score', 0)
best_strand = sorted(strand_scores.items())[0][0]
for tx_obj, attrs in gene_dict[gene]:
if tx_obj.strand == best_strand:
deduplicated_strand_resolved_consensus.append([tx_obj.name, attrs])
else:
metrics['Discarded by strand resolution'] += 1
else:
deduplicated_strand_resolved_consensus.extend([[tx_obj.name, attrs] for tx_obj, attrs in gene_dict[gene]])
return deduplicated_strand_resolved_consensus
###
# This section involves final filtering for overlapping genes caused by incorporating predictions
###
def cluster_genes(gp):
tmp = tools.fileOps.get_tmp_file()
cmd = ['clusterGenes', tmp, 'no', gp, '-cds']
tools.procOps.run_proc(cmd)
return tmp
def resolve_overlapping_cds_intervals(deduplicated_strand_resolved_consensus, tx_dict):
"""
Resolves overlapping genes that are the result of integrating gene predictions
"""
# first, write genePred
attr_df = []
with tools.fileOps.TemporaryFilePath() as tmp_gp, tools.fileOps.TemporaryFilePath() as tmp_clustered:
with open(tmp_gp, 'w') as outf:
for tx_id, attrs in deduplicated_strand_resolved_consensus:
tx_obj = tx_dict[tx_id]
tools.fileOps.print_row(outf, tx_obj.get_gene_pred())
attr_df.append([tx_id, attrs['transcript_class'], attrs['gene_biotype'],
attrs.get('source_gene', tx_obj.name2), attrs.get('score', None)])
# cluster
cmd = ['clusterGenes', tmp_clustered, 'no', tmp_gp, '-cds']
tools.procOps.run_proc(cmd)
cluster_df = pd.read_csv(tmp_clustered, sep='\t')
attr_df = pd.DataFrame(attr_df, columns=['transcript_id', 'transcript_class', 'gene_biotype', 'gene_id', 'score'])
m = attr_df.merge(cluster_df, left_on='transcript_id', right_on='gene') # gene is transcript ID
to_remove = set() # list of transcript IDs to remove
for cluster_id, group in m.groupby('#cluster'):
if len(set(group['gene_id'])) > 1:
if 'unknown_likely_coding' in set(group['gene_biotype']): # pick longest ORF
orfs = {tx_id: tx_dict[tx_id].cds_size for tx_id in group['transcript_id']}
best_tx = sorted(orfs.iteritems(), key=lambda x: x[1])[-1][0]
tx_df = group[group.transcript_id == best_tx].iloc[0]
best_gene = tx_df.gene_id
else: # pick highest average score
avg_scores = group[['gene_id', 'score']].groupby('gene_id', as_index=False).mean()
best_gene = avg_scores.sort_values('score', ascending=False).iloc[0]['gene_id']
to_remove.update(set(group[group.gene_id != best_gene].transcript_id))
return [[tx_id, attrs] for tx_id, attrs in deduplicated_strand_resolved_consensus if tx_id not in to_remove]
###
# Metrics calculations on final set
###
def calculate_completeness(final_consensus, metrics):
"""calculates final completeness to make arithmetic easier"""
genes = collections.defaultdict(set)
txs = collections.Counter()
for aln_id, c in final_consensus:
# don't count novel transcripts towards completeness
if tools.nameConversions.aln_id_is_cgp(aln_id) or tools.nameConversions.aln_id_is_pb(aln_id):
continue
genes[c['gene_biotype']].add(c['source_gene'])
txs[c['transcript_biotype']] += 1
genes = {biotype: len(gene_list) for biotype, gene_list in genes.iteritems()}
metrics['Completeness'] = {'Gene': genes, 'Transcript': txs}
def calculate_improvement_metrics(final_consensus, scored_df, tm_eval_df, hgm_df, metrics):
"""For coding transcripts, how much did we improve the metrics?"""
tm_df = tm_eval_df.reset_index()[['TransMapOriginalIntronsPercent', 'TranscriptId']]
hgm_df_subset = hgm_df[hgm_df['AlignmentId'].apply(tools.nameConversions.aln_id_is_transmap)]
hgm_df_subset = hgm_df_subset[['TranscriptId', 'IntronAnnotSupportPercent', 'IntronRnaSupportPercent']]
tm_df = pd.merge(tm_df, hgm_df_subset, on='TranscriptId')
df = pd.merge(tm_df, scored_df.reset_index(), on='TranscriptId', suffixes=['TransMap', ''])
df = df.drop_duplicates(subset='AlignmentId') # why do I need to do this?
df = df.set_index('AlignmentId')
metrics['Evaluation Improvement'] = {'changes': [], 'unchanged': 0}
for aln_id, c in final_consensus:
if c['transcript_biotype'] != 'protein_coding':
continue
if 'transMap' in c['transcript_modes']:
metrics['Evaluation Improvement']['unchanged'] += 1
continue
tx_s = df.ix[aln_id]
metrics['Evaluation Improvement']['changes'].append([tx_s.TransMapOriginalIntronsPercent,
tx_s.IntronAnnotSupportPercentTransMap,
tx_s.IntronRnaSupportPercentTransMap,
tx_s.OriginalIntronsPercent_mRNA,
tx_s.IntronAnnotSupportPercent,
tx_s.IntronRnaSupportPercent,
tx_s.TransMapGoodness,
tx_s.AlnGoodness_mRNA])
def calculate_indel_metrics(final_consensus, eval_df, metrics):
"""How many transcripts in the final consensus have indels? How many did we have in transMap?"""
if len(eval_df) == 0: # edge case where no transcripts hit indel filters
metrics['transMap Indels'] = {}
metrics ['Consensus Indels'] = {}
return
eval_df_transmap = eval_df[eval_df['AlignmentId'].apply(tools.nameConversions.aln_id_is_transmap)]
tm_vals = eval_df_transmap.set_index('AlignmentId').sum(axis=0)
tm_vals = 100.0 * tm_vals / len(set(eval_df_transmap.index))
metrics['transMap Indels'] = tm_vals.to_dict()
consensus_ids = set(zip(*final_consensus)[0])
consensus_vals = eval_df[eval_df['AlignmentId'].isin(consensus_ids)].set_index('AlignmentId').sum(axis=0)
consensus_vals = 100.0 * consensus_vals / len(final_consensus)
metrics['Consensus Indels'] = consensus_vals.to_dict()
###
# Writing output genePred and GFF3
###
def write_consensus_gps(consensus_gp, consensus_gp_info, final_consensus, tx_dict, genome):
"""
Write the resulting gp + gp_info, generating genome-specific unique identifiers
"""
# keeps track of gene # -> ID mappings
genes_seen = collections.defaultdict(dict)
gene_count = 0
consensus_gene_dict = DefaultOrderedDict(lambda: DefaultOrderedDict(list)) # used to make gff3 next
gp_infos = []
consensus_gp_target = luigi.LocalTarget(consensus_gp)
with consensus_gp_target.open('w') as out_gp:
for tx_count, (tx, attrs) in enumerate(final_consensus, 1):
attrs = attrs.copy()
tx_obj = tx_dict[tx]
name = id_template.format(genome=genome, tag_type='T', unique_id=tx_count)
score = int(round(attrs.get('score', 0)))
source_gene = attrs['source_gene']
if source_gene is None:
source_gene = tx_obj.name2
if source_gene not in genes_seen[tx_obj.chromosome]:
gene_count += 1
genes_seen[tx_obj.chromosome][source_gene] = gene_count
gene_id = genes_seen[tx_obj.chromosome][source_gene]
name2 = id_template.format(genome=genome, tag_type='G', unique_id=gene_id)
out_gp.write('\t'.join(tx_obj.get_gene_pred(name=name, name2=name2, score=score)) + '\n')
attrs['transcript_id'] = name
attrs['gene_id'] = name2
gp_infos.append(attrs)
consensus_gene_dict[tx_obj.chromosome][name2].append([tx_obj, attrs])
gp_info_df = pd.DataFrame(gp_infos)
gp_info_df = gp_info_df.set_index(['gene_id', 'transcript_id'])
# its possible alternative_source_transcripts did not end up in the final result, so add it
if 'alternative_source_transcripts' not in gp_info_df.columns:
gp_info_df['alternative_source_transcripts'] = ['N/A'] * len(gp_info_df)
with luigi.LocalTarget(consensus_gp_info).open('w') as outf:
gp_info_df.to_csv(outf, sep='\t', na_rep='N/A')
return consensus_gene_dict
def write_consensus_gff3(consensus_gene_dict, consensus_gff3):
"""
Write the consensus set in gff3 format
"""
def convert_attrs(attrs, id_field):
"""converts the attrs dict to a attributes field. assigns name to the gene common name for display"""
attrs['ID'] = id_field
if 'score' in attrs:
score = 10 * attrs['score']
del attrs['score']
else:
score = '.'
if 'source_gene_common_name' in attrs:
attrs['Name'] = attrs['source_gene_common_name']
# don't include the support vectors in the string, they will be placed in their respective places
attrs_str = ['='.join([key, str(val)]) for key, val in sorted(attrs.iteritems()) if 'support' not in key]
# explicitly escape any semicolons that may exist in the input strings
attrs_str = [x.replace(';', '%3B') for x in attrs_str]
return score, ';'.join(attrs_str)
def find_feature_support(attrs, feature, i):
"""Extracts the boolean value from the comma delimited string"""
try:
vals = map(bool, attrs[feature].split(','))
except KeyError:
return 'N/A'
return vals[i]
def generate_gene_record(chrom, tx_objs, gene_id, attrs_list):
"""calculates the gene interval for this list of tx"""
def find_all_tx_modes(attrs_list):
tx_modes = set()
for attrs in attrs_list:
tx_modes.update(attrs['transcript_modes'].split(','))
return ','.join(tx_modes)
intervals = set()
for tx in tx_objs:
intervals.update(tx.exon_intervals)
intervals = sorted(intervals)
strand = tx_objs[0].strand
# subset the attrs to gene fields
attrs = attrs_list[0]
useful_keys = ['source_gene_common_name', 'source_gene', 'gene_biotype',
'alternative_source_transcripts', 'gene_alternate_contigs']
attrs = {key: attrs[key] for key in useful_keys if key in attrs}
attrs['transcript_modes'] = find_all_tx_modes(attrs_list)
score, attrs_field = convert_attrs(attrs, gene_id)
return [chrom, 'CAT', 'gene', intervals[0].start + 1, intervals[-1].stop, score, strand, '.', attrs_field]
def generate_transcript_record(chrom, tx_obj, attrs):
"""generates transcript records, calls generate_exon_records to generate those too"""
tx_id = tx_obj.name
attrs['Parent'] = attrs['gene_id']
score, attrs_field = convert_attrs(attrs, tx_id)
yield [chrom, 'CAT', 'transcript', tx_obj.start + 1, tx_obj.stop, score, tx_obj.strand, '.', attrs_field]
# hack to remove the frameshift field from lower objects
# TODO: record the actual exon with the frameshift.
if 'frameshift' in attrs:
del attrs['frameshift']
for line in generate_intron_exon_records(chrom, tx_obj, tx_id, attrs):
yield line
if tx_obj.cds_size > 3:
for line in generate_start_stop_codon_records(chrom, tx_obj, tx_id, attrs):
yield line
def generate_intron_exon_records(chrom, tx_obj, tx_id, attrs):
"""generates intron and exon records"""
attrs['Parent'] = tx_id
# exon records
cds_i = 0 # keep track of position of CDS in case of entirely non-coding exons
for i, (exon, exon_frame) in enumerate(zip(*[tx_obj.exon_intervals, tx_obj.exon_frames])):
attrs['rna_support'] = find_feature_support(attrs, 'exon_rna_support', i)
attrs['reference_support'] = find_feature_support(attrs, 'exon_annotation_support', i)
score, attrs_field = convert_attrs(attrs, 'exon:{}:{}'.format(tx_id, i))
yield [chrom, 'CAT', 'exon', exon.start + 1, exon.stop, score, exon.strand, '.', attrs_field]
cds_interval = exon.intersection(tx_obj.coding_interval)
if cds_interval is not None:
#attrs['reference_support'] = find_feature_support(attrs, 'cds_annotation_support', cds_i)
score, attrs_field = convert_attrs(attrs, 'CDS:{}:{}'.format(tx_id, cds_i))
cds_i += 1
yield [chrom, 'CAT', 'CDS', cds_interval.start + 1, cds_interval.stop, score, exon.strand,
tools.transcripts.convert_frame(exon_frame), attrs_field]
# intron records
for i, intron in enumerate(tx_obj.intron_intervals):
if len(intron) == 0:
continue
attrs['rna_support'] = find_feature_support(attrs, 'intron_rna_support', i)
attrs['reference_support'] = find_feature_support(attrs, 'intron_annotation_support', i)
score, attrs_field = convert_attrs(attrs, 'intron:{}:{}'.format(tx_id, i))
yield [chrom, 'CAT', 'intron', intron.start + 1, intron.stop, score, intron.strand, '.', attrs_field]
def generate_start_stop_codon_records(chrom, tx_obj, tx_id, attrs):
"""generate start/stop codon GFF3 records, handling frame appropriately"""
if attrs['valid_start'] is True:
score, attrs_field = convert_attrs(attrs, 'start_codon:{}'.format(tx_id))
for interval in tx_obj.get_start_intervals():
yield [chrom, 'CAT', 'start_codon', interval.start + 1, interval.stop, score, tx_obj.strand,
interval.data, attrs_field]
if attrs['valid_stop'] is True:
score, attrs_field = convert_attrs(attrs, 'stop_codon:{}'.format(tx_id))
for interval in tx_obj.get_stop_intervals():
yield [chrom, 'CAT', 'stop_codon', interval.start + 1, interval.stop, score, tx_obj.strand,
interval.data, attrs_field]
# main gff3 writing logic
consensus_gff3 = luigi.LocalTarget(consensus_gff3)
with consensus_gff3.open('w') as out_gff3:
out_gff3.write('##gff-version 3\n')
for chrom in sorted(consensus_gene_dict):
#out_gff3.write('###sequence-region {}\n'.format(chrom))
for gene_id, tx_list in consensus_gene_dict[chrom].iteritems():
tx_objs, attrs_list = zip(*tx_list)
tx_lines = [generate_gene_record(chrom, tx_objs, gene_id, attrs_list)]
for tx_obj, attrs in tx_list:
tx_lines.extend(list(generate_transcript_record(chrom, tx_obj, attrs)))
tx_lines = sorted(tx_lines, key=lambda l: l[3])
tools.fileOps.print_rows(out_gff3, tx_lines)
|
octoblu/alljoyn
|
refs/heads/master
|
alljoyn/services/controlpanel/cpp/tools/CPSAppGenerator/GeneratorUtils/actionWidget.py
|
3
|
# Copyright AllSeen Alliance. All rights reserved.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import xml.etree.ElementTree as xml
import commonWidget as common
import dialogWidget as dw
class Action (common.Widget):
def __init__(self, generated, actionElement, parentObjectPath, languageSetName) :
common.Widget.__init__(self, generated, actionElement, parentObjectPath, languageSetName)
if hasattr(self.element.onAction, "executeCode") :
self.widgetName = self.name[:1].upper() + self.name [1:]
else :
self.widgetName = "ActionWithDialog"
def generate(self) :
common.Widget.generate(self)
self.generateOnAction()
def generateOnAction (self) :
onAction = self.element.onAction
if hasattr(onAction, "executeCode") :
actionHeaderFile = self.generated.actionHeaderFile
actionSrcFile = self.generated.actionSrcFile
regularName = self.widgetName
capitalName = self.name.upper()
actionHeaderFile = actionHeaderFile.replace("CAPITAL_NAME_HERE", capitalName)
actionHeaderFile = actionHeaderFile.replace("REGULAR_NAME_HERE", regularName)
actionSrcFile = actionSrcFile.replace("CAPITAL_NAME_HERE", capitalName)
actionSrcFile = actionSrcFile.replace("REGULAR_NAME_HERE", regularName)
actionSrcFile = actionSrcFile.replace("ADDITIONAL_INCLUDES_HERE", self.generated.srcIncludes)
actionSrcFile = actionSrcFile.replace("EXECUTE_ACTION_HERE", onAction.executeCode)
self.generated.headerIncludes += self.generateHeaderInclude()
genH = open(self.generated.path + "/" + regularName + ".h", 'w')
genH.write(actionHeaderFile)
genH.close()
genC = open(self.generated.path + "/" + regularName + ".cc", 'w')
genC.write(actionSrcFile)
genC.close()
elif hasattr(onAction, "dialog") :
dialog = dw.Dialog(self.generated, onAction.dialog, self.name, self.languageSetName)
dialog.generate()
|
riadnassiffe/Simulator
|
refs/heads/master
|
src/tools/ecos/cvxpy/cvxpy/interface/numpy_interface/sparse_matrix_interface.py
|
1
|
"""
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy.interface.numpy_interface.ndarray_interface import NDArrayInterface
import scipy.sparse as sp
import numpy as np
import numbers
import cvxopt
class SparseMatrixInterface(NDArrayInterface):
"""
An interface to convert constant values to the scipy sparse CSC class.
"""
TARGET_MATRIX = sp.csc_matrix
@NDArrayInterface.scalar_const
def const_to_matrix(self, value, convert_scalars=False):
"""Convert an arbitrary value into a matrix of type self.target_matrix.
Args:
value: The constant to be converted.
convert_scalars: Should scalars be converted?
Returns:
A matrix of type self.target_matrix or a scalar.
"""
# Convert cvxopt sparse to coo matrix.
if isinstance(value, cvxopt.spmatrix):
Vp, Vi, Vx = value.CCS
Vp, Vi = (np.fromiter(iter(x),
dtype=np.int32,
count=len(x))
for x in (Vp, Vi))
Vx = np.fromiter(iter(Vx), dtype=np.double)
m, n = value.size
return sp.csc_matrix((Vx, Vi, Vp), shape=(m, n))
if isinstance(value, list):
return sp.csc_matrix(value, dtype=np.double).T
return sp.csc_matrix(value, dtype=np.double)
def identity(self, size):
"""Return an identity matrix.
"""
return sp.eye(size, size, format="csc")
def size(self, matrix):
"""Return the dimensions of the matrix.
"""
return matrix.shape
def scalar_value(self, matrix):
"""Get the value of the passed matrix, interpreted as a scalar.
"""
return matrix[0, 0]
def zeros(self, rows, cols):
"""Return a matrix with all 0's.
"""
return sp.csc_matrix((rows, cols), dtype='float64')
def reshape(self, matrix, size):
"""Change the shape of the matrix.
"""
matrix = matrix.todense()
matrix = super(SparseMatrixInterface, self).reshape(matrix, size)
return self.const_to_matrix(matrix, convert_scalars=True)
def block_add(self, matrix, block, vert_offset, horiz_offset, rows, cols,
vert_step=1, horiz_step=1):
"""Add the block to a slice of the matrix.
Args:
matrix: The matrix the block will be added to.
block: The matrix/scalar to be added.
vert_offset: The starting row for the matrix slice.
horiz_offset: The starting column for the matrix slice.
rows: The height of the block.
cols: The width of the block.
vert_step: The row step size for the matrix slice.
horiz_step: The column step size for the matrix slice.
"""
block = self._format_block(matrix, block, rows, cols)
slice_ = [slice(vert_offset, rows+vert_offset, vert_step),
slice(horiz_offset, horiz_offset+cols, horiz_step)]
# Convert to lil before changing sparsity structure.
matrix[slice_[0], slice_[1]] = matrix[slice_[0], slice_[1]] + block
|
friendsoftheweb/woff-rb
|
refs/heads/master
|
woffTools/Lib/woffTools/__init__.py
|
1
|
"""
This implements the WOFF specification dated September 16, 2009.
The main object is the WOFFFont. It is a subclass for the FontTools
TTFont object, so it has very similar functionality. The WOFFReader
and WOFFWriter are also available for use outside of this module.
Those objects are much faster than WOFFFont, but they require much
more care.
"""
import zlib
import struct
from fontTools.misc import sstruct
from cStringIO import StringIO
from xml.etree import ElementTree
from fontTools.ttLib import TTFont, debugmsg, sortedTagList
from fontTools.ttLib.sfnt import getSearchRange, calcChecksum, SFNTDirectoryEntry, \
sfntDirectoryFormat, sfntDirectorySize, sfntDirectoryEntryFormat, sfntDirectoryEntrySize
# -----------
# Main Object
# -----------
class WOFFFont(TTFont):
"""
This object represents a WOFF file. It is a subclass of
the FontTools TTFont object, so the same API applies.
For information about the arguments in __init__,
refer to the TTFont documentation.
This object has two special attributes: metadata and privateData.
The metadata attribute returns an ElementTree Element object
representing the metadata stored in the font. To set new metadata
in the font, you must use this object. The privateData attribute
returns the private data stored in the font. To set private data,
set a string to font.privateData.
"""
def __init__(self, file=None, flavor="\000\001\000\000",
checkChecksums=0, verbose=False, recalcBBoxes=True,
allowVID=False, ignoreDecompileErrors=False):
# can't use the TTFont __init__ because it goes directly to the SFNTReader.
# see that method for details about all of this.
self.verbose = verbose
self.recalcBBoxes = recalcBBoxes
self.tables = {}
self.reader = None
self.last_vid = 0xFFFE
self.reverseVIDDict = {}
self.VIDDict = {}
self.allowVID = allowVID
self.ignoreDecompileErrors = ignoreDecompileErrors
self.flavor = flavor
self.majorVersion = 0
self.minorVersion = 0
self._metadata = None
self._tableOrder = None
if file is not None:
if not hasattr(file, "read"):
file = open(file, "rb")
self.reader = WOFFReader(file, checkChecksums=checkChecksums)
self.flavor = self.reader.flavor
self.majorVersion = self.reader.majorVersion
self.minorVersion = self.reader.minorVersion
self._tableOrder = self.reader.keys()
else:
self._metadata = ElementTree.Element("metadata", version="1.0")
self.privateData = None
def __getattr__(self, attr):
if attr not in ("privateData", "metadata"):
raise AttributeError(attr)
# metadata
if attr == "metadata":
if self._metadata is not None:
return self._metadata
if self.reader is not None:
text = self.reader.metadata
if text:
metadata = ElementTree.fromstring(text)
else:
metadata = ElementTree.Element("metadata", version="1.0")
self._metadata = metadata
return self._metadata
return None
# private data
elif attr == "privateData":
if not hasattr(self, "privateData"):
privateData = None
if self.reader is not None:
privateData = self.reader.privateData
self.privateData = privateData
return self.privateData
# fallback to None
return None
def keys(self):
"""
Return a list of all tables in the font. If a table order
has been set manually or as the result of opening an existing
WOFF file, the set table order will be in the list first.
Tables not defined in an existing order will be sorted following
the suggested ordering in the OTF/OFF specification.
The first table listed in all cases is the GlyphOrder pseudo table.
"""
tags = set(self.tables.keys())
if self.reader is not None:
tags = tags | set(self.reader.keys())
tags = list(tags)
if "GlyphOrder" in tags:
tags.remove("GlyphOrder")
return ["GlyphOrder"] + sortedTagList(tags, self._tableOrder)
def setTableOrder(self, order):
"""
Set the order in which tables should be written
into the font. This is required if a DSIG table
is in the font.
"""
self._tableOrder = order
def save(self, file, compressionLevel=9, recompressTables=False, reorderTables=True, recalculateHeadChecksum=True):
"""
Save a WOFF into file a file object specifified by the
file argument.. Optionally, file can be a path and a
new file will be created at that location.
compressionLevel is the compression level to be
used with zlib. This must be an int between 1 and 9.
The default is 9, the highest compression, but slowest
compression time.
Set recompressTables to True if you want any already
compressed tables to be decompressed and then recompressed
using the level specified by compressionLevel.
If you want the tables in the WOFF reordered following
the suggested optimal table orderings described in the
OTF/OFF sepecification, set reorderTables to True.
Tables cannot be reordered if a DSIG table is in the font.
If you change any of the SFNT data or reorder the tables,
the head table checkSumAdjustment must be recalculated.
If you are not changing any of the SFNT data, you can set
recalculateHeadChecksum to False to prevent the recalculation.
This must be set to False if the font contains a DSIG table.
"""
# if DSIG is to be written, the table order
# must be completely specified. otherwise the
# DSIG may not be valid after decoding the WOFF.
tags = self.keys()
if "GlyphOrder" in tags:
tags.remove("GlyphOrder")
if "DSIG" in tags:
if self._tableOrder is None or (set(self._tableOrder) != set(tags)):
raise WOFFLibError("A complete table order must be supplied when saving a font with a 'DSIG' table.")
elif reorderTables:
raise WOFFLibError("Tables can not be reordered when a 'DSIG' table is in the font. Set reorderTables to False.")
elif recalculateHeadChecksum:
raise WOFFLibError("The 'head' table checkSumAdjustment can not be recalculated when a 'DSIG' table is in the font.")
# sort the tags if necessary
if reorderTables:
tags = sortedTagList(tags)
# open a file if necessary
closeStream = False
if not hasattr(file, "write"):
closeStream = True
file = open(file, "wb")
# write the table data
if "GlyphOrder" in tags:
tags.remove("GlyphOrder")
numTables = len(tags)
writer = WOFFWriter(file, numTables, flavor=self.flavor,
majorVersion=self.majorVersion, minorVersion=self.minorVersion,
compressionLevel=compressionLevel, recalculateHeadChecksum=recalculateHeadChecksum,
verbose=self.verbose)
for tag in tags:
origData = None
origLength = None
origChecksum = None
compLength = None
# table is loaded
if self.isLoaded(tag):
origData = self.getTableData(tag)
# table is in reader
elif self.reader is not None:
if recompressTables:
origData = self.getTableData(tag)
else:
if self.verbose:
debugmsg("Reading '%s' table from disk" % tag)
origData, origLength, origChecksum, compLength = self.reader.getCompressedTableData(tag)
# add to writer
writer.setTable(tag, origData, origLength=origLength, origChecksum=origChecksum, compLength=compLength)
# write the metadata
metadata = None
metaOrigLength = None
metaLength = None
if hasattr(self, "metadata"):
declaration = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
tree = ElementTree.ElementTree(self.metadata)
f = StringIO()
tree.write(f, encoding="utf-8")
metadata = f.getvalue()
# make sure the metadata starts with the declaration
if not metadata.startswith(declaration):
metadata = declaration + metadata
del f
elif self.reader is not None:
if recompressTables:
metadata = self.reader.metadata
else:
metadata, metaOrigLength, metaLength = self.reader.getCompressedMetadata()
if metadata:
writer.setMetadata(metadata, metaOrigLength=metaOrigLength, metaLength=metaLength)
# write the private data
privData = self.privateData
if privData:
writer.setPrivateData(privData)
# close the writer
writer.close()
# close the file
if closeStream:
file.close()
def saveXML(self):
raise NotImplementedError
def importXML(self):
raise NotImplementedError
# ------
# Reader
# ------
woffHeaderFormat = """
> # big endian
signature: 4s
flavor: 4s
length: L
numTables: H
reserved: H
totalSFNTSize: L
majorVersion: H
minorVersion: H
metaOffset: L
metaLength: L
metaOrigLength: L
privOffset: L
privLength: L
"""
woffHeaderSize = sstruct.calcsize(woffHeaderFormat)
class WOFFReader(object):
def __init__(self, file, checkChecksums=1):
self.file = file
self.checkChecksums = checkChecksums
# unpack the header
self.file.seek(0)
bytes = self.file.read(woffHeaderSize)
if len(bytes) != woffHeaderSize:
raise WOFFLibError("Not a properly formatted WOFF file.")
sstruct.unpack(woffHeaderFormat, bytes, self)
if self.signature != "wOFF":
raise WOFFLibError("Not a properly formatted WOFF file.")
# unpack the directory
self.tables = {}
for i in range(self.numTables):
entry = WOFFDirectoryEntry()
entry.fromFile(self.file)
self.tables[entry.tag] = entry
def close(self):
self.file.close()
def __contains__(self, tag):
return tag in self.tables
has_key = __contains__
def keys(self):
"""
This returns a list of all tables in the WOFF
sorted in ascending order based on the offset
of each table.
"""
sorter = []
for tag, entry in self.tables.items():
sorter.append((entry.offset, tag))
order = [tag for offset, tag in sorted(sorter)]
return order
def __getitem__(self, tag):
entry = self.tables[tag]
self.file.seek(entry.offset)
data = self.file.read(entry.compLength)
# decompress if necessary
if entry.compLength < entry.origLength:
data = zlib.decompress(data)
else:
data = data[:entry.origLength]
# compare the checksums
if self.checkChecksums:
checksum = calcTableChecksum(tag, data)
if self.checkChecksums > 1:
assert checksum == entry.origChecksum, "bad checksum for '%s' table" % tag
elif checksum != entry.origChecksum:
print "bad checksum for '%s' table" % tag
print
return data
def getCompressedTableData(self, tag):
entry = self.tables[tag]
self.file.seek(entry.offset)
data = self.file.read(entry.compLength)
return data, entry.origLength, entry.origChecksum, entry.compLength
def getCompressedMetadata(self):
self.file.seek(self.metaOffset)
data = self.file.read(self.metaLength)
return data, self.metaOrigLength, self.metaLength
def __getattr__(self, attr):
if attr not in ("privateData", "metadata"):
raise AttributeError(attr)
if attr == "privateData":
self.file.seek(self.privOffset)
return self.file.read(self.privLength)
if attr == "metadata":
self.file.seek(self.metaOffset)
data = self.file.read(self.metaLength)
if self.metaLength:
data = zlib.decompress(data)
assert len(data) == self.metaOrigLength
return data
def __delitem__(self, tag):
del self.tables[tag]
# ------
# Writer
# ------
class WOFFWriter(object):
def __init__(self, file, numTables, flavor="\000\001\000\000",
majorVersion=0, minorVersion=0, compressionLevel=9,
recalculateHeadChecksum=True,
verbose=False):
self.signature = "wOFF"
self.flavor = flavor
self.length = woffHeaderSize + (numTables * woffDirectoryEntrySize)
self.totalSFNTSize = sfntDirectorySize + (numTables * sfntDirectoryEntrySize)
self.numTables = numTables
self.majorVersion = majorVersion
self.minorVersion = minorVersion
self.metaOffset = 0
self.metaOrigLength = 0
self.metaLength = 0
self.privOffset = 0
self.privLength = 0
self.reserved = 0
self.file = file
self.compressionLevel = compressionLevel
self.recalculateHeadChecksum = recalculateHeadChecksum
self.verbose = verbose
# the data is held to facilitate the
# head checkSumAdjustment calculation.
self.tables = {}
self.metadata = None
self.privateData = None
self.tableDataEnd = 0
self.metadataEnd = 0
def _tableOrder(self):
return [entry.tag for index, entry, data in sorted(self.tables.values())]
def setTable(self, tag, data, origLength=None, origChecksum=None, compLength=None):
# don't compress the head if the checkSumAdjustment needs to be recalculated
# the compression will be handled later.
if self.recalculateHeadChecksum and tag == "head":
# decompress
if compLength is not None and compLength < origLength:
data = zlib.decompress(data)
entry = self._prepTable(tag, data, origLength=len(data), entryOnly=True)
# compress
else:
entry, data = self._prepTable(tag, data=data, origLength=origLength, origChecksum=origChecksum, compLength=compLength)
# store
self.tables[tag] = (len(self.tables), entry, data)
def setMetadata(self, data, metaOrigLength=None, metaLength=None):
if not data:
return
if metaLength is None:
if self.verbose:
debugmsg("compressing metadata")
metaOrigLength = len(data)
data = zlib.compress(data, self.compressionLevel)
metaLength = len(data)
# set the header values
self.metaOrigLength = metaOrigLength
self.metaLength = metaLength
# store
self.metadata = data
def setPrivateData(self, data):
if not data:
return
privLength = len(data)
# set the header value
self.privLength = privLength
# store
self.privateData = data
def close(self):
if self.numTables != len(self.tables):
raise WOFFLibError("wrong number of tables; expected %d, found %d" % (self.numTables, len(self.tables)))
# first, handle the checkSumAdjustment
if self.recalculateHeadChecksum and "head" in self.tables:
self._handleHeadChecksum()
# check the table directory conformance
for tag, (index, entry, data) in sorted(self.tables.items()):
self._checkTableConformance(entry, data)
# write the header
header = sstruct.pack(woffHeaderFormat, self)
self.file.seek(0)
self.file.write(header)
# update the directory offsets
offset = woffHeaderSize + (woffDirectoryEntrySize * self.numTables)
order = self._tableOrder()
for tag in order:
index, entry, data = self.tables[tag]
entry.offset = offset
offset += calc4BytePaddedLength(entry.compLength) # ensure byte alignment
# write the directory
self._writeTableDirectory()
# write the table data
self._writeTableData()
# write the metadata
self._writeMetadata()
# write the private data
self._writePrivateData()
# write the header
self._writeHeader()
# go to the beginning of the file
self.file.seek(0)
# header support
def _writeHeader(self):
header = sstruct.pack(woffHeaderFormat, self)
self.file.seek(0)
self.file.write(header)
# sfnt support
def _prepTable(self, tag, data, origLength=None, origChecksum=None, compLength=None, entryOnly=False):
# skip data prep
if entryOnly:
origLength = origLength
origChecksum = calcTableChecksum(tag, data)
compLength = 0
# prep the data
else:
# compress
if compLength is None:
origData = data
origLength = len(origData)
origChecksum = calcTableChecksum(tag, data)
if self.verbose:
debugmsg("compressing '%s' table" % tag)
compData = zlib.compress(origData, self.compressionLevel)
compLength = len(compData)
if origLength <= compLength:
data = origData
compLength = origLength
else:
data = compData
# make the directory entry
entry = WOFFDirectoryEntry()
entry.tag = tag
entry.offset = 0
entry.origLength = origLength
entry.origChecksum = origChecksum
entry.compLength = compLength
# return
if entryOnly:
return entry
return entry, data
def _checkTableConformance(self, entry, data):
"""
Check the conformance of the table directory entries.
These must be checked because the origChecksum, origLength
and compLength can be set by an outside caller.
"""
if self.verbose:
debugmsg("checking conformance of '%s' table" % entry.tag)
# origLength must be less than or equal to compLength
if entry.origLength < entry.compLength:
raise WOFFLibError("origLength and compLength are not correct in the '%s' table entry." % entry.tag)
# unpack the data as needed
if entry.origLength > entry.compLength:
origData = zlib.decompress(data)
compData = data
else:
origData = data
compData = data
# the origLength entry must match the actual length
if entry.origLength != len(origData):
raise WOFFLibError("origLength is not correct in the '%s' table entry." % entry.tag)
# the checksum must be correct
if entry.origChecksum != calcTableChecksum(entry.tag, origData):
raise WOFFLibError("origChecksum is not correct in the '%s' table entry." % entry.tag)
# the compLength must be correct
if entry.compLength != len(compData):
raise WOFFLibError("compLength is not correct in the '%s' table entry." % entry.tag)
def _handleHeadChecksum(self):
if self.verbose:
debugmsg("updating head checkSumAdjustment")
# get the value
tables = {}
offset = sfntDirectorySize + (sfntDirectoryEntrySize * self.numTables)
for (index, entry, data) in sorted(self.tables.values()):
tables[entry.tag] = dict(offset=offset, length=entry.origLength, checkSum=entry.origChecksum)
offset += calc4BytePaddedLength(entry.origLength)
checkSumAdjustment = calcHeadCheckSumAdjustment(self.flavor, tables)
# set the value in the head table
index, entry, data = self.tables["head"]
data = data[:8] + struct.pack(">L", checkSumAdjustment) + data[12:]
# compress the data
newEntry, data = self._prepTable("head", data)
# update the entry data
assert entry.origChecksum == newEntry.origChecksum
entry.origLength = newEntry.origLength
entry.compLength = newEntry.compLength
# store
self.tables["head"] = (index, entry, data)
def _writeTableDirectory(self):
if self.verbose:
debugmsg("writing table directory")
self.file.seek(woffHeaderSize)
for tag, (index, entry, data) in sorted(self.tables.items()):
entry = sstruct.pack(woffDirectoryEntryFormat, entry)
self.file.write(entry)
def _writeTableData(self):
d = woffHeaderSize + (woffDirectoryEntrySize * self.numTables)
offset = woffHeaderSize + (woffDirectoryEntrySize * self.numTables)
self.file.seek(offset)
for tag in self._tableOrder():
if self.verbose:
debugmsg("writing '%s' table" % tag)
index, entry, data = self.tables[tag]
data += "\0" * (calc4BytePaddedLength(entry.compLength) - entry.compLength ) # ensure byte alignment
self.file.write(data)
self.length += calc4BytePaddedLength(entry.compLength) # ensure byte alignment
self.totalSFNTSize += calc4BytePaddedLength(entry.origLength) # ensure byte alignment
# store the end for use by metadata or private data
self.tableDataEnd = self.length
# metadata support
def _writeMetadata(self):
if self.metadata is None:
return
if self.verbose:
debugmsg("writing metadata")
self.length += self.metaLength
self.metaOffset = self.tableDataEnd
self.file.seek(self.metaOffset)
self.file.write(self.metadata)
# store the end for use by private data
self.metadataEnd = self.metaOffset + self.metaLength
# if private data exists, pad to a four byte boundary
if self.privateData is not None:
padding = calc4BytePaddedLength(self.metaLength) - self.metaLength
self.metadataEnd += padding
self.length += padding
padding = "\0" * padding
if padding:
self.file.write(padding)
# private data support
def _writePrivateData(self):
if self.privateData is None:
return
if self.verbose:
debugmsg("writing private data")
if self.metadata is not None:
self.privOffset = self.metadataEnd
else:
self.privOffset = self.tableDataEnd
self.length += self.privLength
self.file.seek(self.privOffset)
self.file.write(self.privateData)
# ---------
# Directory
# ---------
woffDirectoryEntryFormat = """
> # big endian
tag: 4s
offset: L
compLength: L
origLength: L
origChecksum: L
"""
woffDirectoryEntrySize = sstruct.calcsize(woffDirectoryEntryFormat)
class WOFFDirectoryEntry(object):
def fromFile(self, file):
sstruct.unpack(woffDirectoryEntryFormat, file.read(woffDirectoryEntrySize), self)
def fromString(self, str):
sstruct.unpack(woffDirectoryEntryFormat, str, self)
def toString(self):
return sstruct.pack(woffDirectoryEntryFormat, self)
def __repr__(self):
if hasattr(self, "tag"):
return "<WOFFDirectoryEntry '%s' at %x>" % (self.tag, id(self))
else:
return "<WOFFDirectoryEntry at %x>" % id(self)
# -------
# Helpers
# -------
class WOFFLibError(Exception): pass
def calc4BytePaddedLength(length):
return (length + 3) & ~3
def calcTableChecksum(tag, data):
if tag == "head":
checksum = calcChecksum(data[:8] + '\0\0\0\0' + data[12:])
else:
checksum = calcChecksum(data)
checksum = checksum & 0xffffffff
return checksum
def calcHeadCheckSumAdjustment(flavor, tables):
numTables = len(tables)
# build the sfnt header
searchRange, entrySelector, rangeShift = getSearchRange(numTables)
sfntDirectoryData = dict(
sfntVersion=flavor,
numTables=numTables,
searchRange=searchRange,
entrySelector=entrySelector,
rangeShift=rangeShift
)
# build the sfnt directory
directory = sstruct.pack(sfntDirectoryFormat, sfntDirectoryData)
for tag, entry in sorted(tables.items()):
entry = tables[tag]
sfntEntry = SFNTDirectoryEntry()
sfntEntry.tag = tag
sfntEntry.checkSum = entry["checkSum"]
sfntEntry.offset = entry["offset"]
sfntEntry.length = entry["length"]
directory += sfntEntry.toString()
# calculate the checkSumAdjustment
checkSums = [entry["checkSum"] for entry in tables.values()]
checkSums.append(calcChecksum(directory))
checkSumAdjustment = sum(checkSums)
checkSumAdjustment = (0xB1B0AFBA - checkSumAdjustment) & 0xffffffff
# done
return checkSumAdjustment
# ----------------
# SFNT Conformance
# ----------------
def checkSFNTConformance(file):
"""
This function checks a SFNT file to see if it meets
the conformance recomendations in the WOFF specification.
This includes:
- searchRange must be correct.
- entrySelector must be correct.
- rangeShift must be correct.
- offset to each table must be after the table directory
and before the end of the file.
- offset + length of each table must not extend past
the end of the file.
- the table directory must be in ascending order.
- tables must be padded to 4 byte boundaries.
- the final table must be padded to a 4 byte boundary.
- the gaps between table data blocks must not be more
than necessary to pad the table to a 4 byte boundary.
- the gap between the end of the final table and
the end of the file must not be more than necessary
to pad the table to a four byte boundary.
- the checksums for each table in the table directory
must be correct.
- the head checkSumAdjustment must be correct.
- the padding bytes must be null.
The returned value of this function will be a list.
If any errors were found, they will be represented
as strings in the list.
"""
# load the data
closeFile = False
if not hasattr(file, "read"):
file = open(file, "rb")
closeFile = True
data = file.read()
if closeFile:
file.close()
# storage
errors = []
# unpack the header
headerData = data[:sfntDirectorySize]
header = sstruct.unpack(sfntDirectoryFormat, headerData)
# unpack the table directory
numTables = header["numTables"]
directoryData = data[sfntDirectorySize : sfntDirectorySize + (sfntDirectoryEntrySize * numTables)]
tableDirectory = []
for index in range(numTables):
entry = sstruct.unpack(sfntDirectoryEntryFormat, directoryData[:sfntDirectoryEntrySize])
tableDirectory.append(entry)
directoryData = directoryData[sfntDirectoryEntrySize:]
# sanity testing
errors += _testOffsetBoundaryValidity(len(data), tableDirectory)
errors += _testLengthBoundaryValidity(len(data), tableDirectory)
# if one or more errors have already been found, something
# is very wrong and this should come to a screeching halt.
if errors:
return errors
# junk at the beginning of the file
errors += _testJunkAtTheBeginningOfTheFile(header)
# test directory order
errors += _testDirectoryOrder(tableDirectory)
# load the table data
for entry in tableDirectory:
offset = entry["offset"]
length = entry["length"]
entry["data"] = data[offset:offset+length]
# test for overlaps
errors += _testOverlaps(tableDirectory)
# test for padding
errors += _testOffsets(tableDirectory)
# test the final table padding
errors += _testFinalTablePadding(len(data), numTables, tableDirectory[-1]["tag"])
# test for gaps
errors += _testGaps(tableDirectory)
# test for a gap at the end of the file
errors += _testGapAfterFinalTable(len(data), tableDirectory)
# test padding value
errors += _testPaddingValue(tableDirectory, data)
# validate checksums
errors += _testCheckSums(tableDirectory)
errors += _testHeadCheckSum(header, tableDirectory)
# done.
return errors
def _testOffsetBoundaryValidity(dataLength, tableDirectory):
"""
>>> test = [
... dict(tag="test", offset=44)
... ]
>>> bool(_testOffsetBoundaryValidity(45, test))
False
>>> test = [
... dict(tag="test", offset=1)
... ]
>>> bool(_testOffsetBoundaryValidity(45, test))
True
>>> test = [
... dict(tag="test", offset=46)
... ]
>>> bool(_testOffsetBoundaryValidity(45, test))
True
"""
errors = []
numTables = len(tableDirectory)
minOffset = sfntDirectorySize + (sfntDirectoryEntrySize * numTables)
for entry in tableDirectory:
offset = entry["offset"]
tag = entry["tag"]
if offset < minOffset:
errors.append("The offset to the %s table is not valid." % tag)
if offset > dataLength:
errors.append("The offset to the %s table is not valid." % tag)
return errors
def _testLengthBoundaryValidity(dataLength, tableDirectory):
"""
>>> test = [
... dict(tag="test", offset=44, length=1)
... ]
>>> bool(_testLengthBoundaryValidity(45, test))
False
>>> test = [
... dict(tag="test", offset=44, length=2)
... ]
>>> bool(_testLengthBoundaryValidity(45, test))
True
"""
errors = []
entries = [(entry["offset"], entry) for entry in tableDirectory]
for o, entry in sorted(entries):
offset = entry["offset"]
length = entry["length"]
tag = entry["tag"]
end = offset + length
if end > dataLength:
errors.append("The length of the %s table is not valid." % tag)
return errors
def _testJunkAtTheBeginningOfTheFile(header):
"""
>>> test = dict(numTables=5, searchRange=64, entrySelector=2, rangeShift=16)
>>> bool(_testJunkAtTheBeginningOfTheFile(test))
False
>>> test = dict(numTables=5, searchRange=0, entrySelector=2, rangeShift=16)
>>> bool(_testJunkAtTheBeginningOfTheFile(test))
True
>>> test = dict(numTables=5, searchRange=64, entrySelector=0, rangeShift=16)
>>> bool(_testJunkAtTheBeginningOfTheFile(test))
True
>>> test = dict(numTables=5, searchRange=64, entrySelector=2, rangeShift=0)
>>> bool(_testJunkAtTheBeginningOfTheFile(test))
True
"""
errors = []
numTables = header["numTables"]
searchRange, entrySelector, rangeShift = getSearchRange(numTables)
if header["searchRange"] != searchRange:
errors.append("The searchRange value is incorrect.")
if header["entrySelector"] != entrySelector:
errors.append("The entrySelector value is incorrect.")
if header["rangeShift"] != rangeShift:
errors.append("The rangeShift value is incorrect.")
return errors
def _testDirectoryOrder(tableDirectory):
"""
>>> test = [
... dict(tag="aaaa"),
... dict(tag="bbbb")
... ]
>>> bool(_testDirectoryOrder(test))
False
>>> test = [
... dict(tag="bbbb"),
... dict(tag="aaaa")
... ]
>>> bool(_testDirectoryOrder(test))
True
"""
order = [entry["tag"] for entry in tableDirectory]
if order != list(sorted(order)):
return ["The table directory is not in ascending order."]
return []
def _testOverlaps(tableDirectory):
"""
>>> test = [
... dict(tag="aaaa", offset=0, length=100),
... dict(tag="bbbb", offset=1000, length=100),
... ]
>>> bool(_testOverlaps(test))
False
>>> test = [
... dict(tag="aaaa", offset=0, length=100),
... dict(tag="bbbb", offset=50, length=100),
... ]
>>> bool(_testOverlaps(test))
True
>>> test = [
... dict(tag="aaaa", offset=0, length=100),
... dict(tag="bbbb", offset=0, length=100),
... ]
>>> bool(_testOverlaps(test))
True
>>> test = [
... dict(tag="aaaa", offset=0, length=100),
... dict(tag="bbbb", offset=0, length=150),
... ]
>>> bool(_testOverlaps(test))
True
"""
# gather the edges
edges = {}
for entry in tableDirectory:
start = entry["offset"]
end = start + entry["length"]
edges[entry["tag"]] = (start, end)
# look for overlaps
overlaps = set()
for tag, (start, end) in edges.items():
for otherTag, (otherStart, otherEnd) in edges.items():
tag = tag.strip()
otherTag = otherTag.strip()
if tag == otherTag:
continue
if start >= otherStart and start < otherEnd:
l = sorted((tag, otherTag))
overlaps.add(tuple(l))
if end > otherStart and end <= otherEnd:
l = sorted((tag, otherTag))
overlaps.add(tuple(l))
# report
errors = []
if overlaps:
for t1, t2 in sorted(overlaps):
errors.append("The tables %s and %s overlap." % (t1, t2))
return errors
def _testOffsets(tableDirectory):
"""
>>> test = [
... dict(tag="test", offset=1)
... ]
>>> bool(_testOffsets(test))
True
>>> test = [
... dict(tag="test", offset=2)
... ]
>>> bool(_testOffsets(test))
True
>>> test = [
... dict(tag="test", offset=3)
... ]
>>> bool(_testOffsets(test))
True
>>> test = [
... dict(tag="test", offset=4)
... ]
>>> bool(_testOffsets(test))
False
"""
errors = []
# make the entries sortable
entries = [(entry["offset"], entry) for entry in tableDirectory]
for o, entry in sorted(entries):
offset = entry["offset"]
if offset % 4:
errors.append("The %s table does not begin on a 4-byte boundary." % entry["tag"].strip())
return errors
def _testFinalTablePadding(dataLength, numTables, finalTableTag):
"""
>>> bool(_testFinalTablePadding(
... sfntDirectorySize + sfntDirectoryEntrySize + 1,
... 1,
... "test"
... ))
True
>>> bool(_testFinalTablePadding(
... sfntDirectorySize + sfntDirectoryEntrySize + 2,
... 1,
... "test"
... ))
True
>>> bool(_testFinalTablePadding(
... sfntDirectorySize + sfntDirectoryEntrySize + 3,
... 1,
... "test"
... ))
True
>>> bool(_testFinalTablePadding(
... sfntDirectorySize + sfntDirectoryEntrySize + 4,
... 1,
... "test"
... ))
False
"""
errors = []
if (dataLength - (sfntDirectorySize + (sfntDirectoryEntrySize * numTables))) % 4:
errors.append("The final table (%s) is not properly padded." % finalTableTag)
return errors
def _testGaps(tableDirectory):
"""
>>> start = sfntDirectorySize + (sfntDirectoryEntrySize * 2)
>>> test = [
... dict(offset=start, length=4, tag="test1"),
... dict(offset=start+4, length=4, tag="test2"),
... ]
>>> bool(_testGaps(test))
False
>>> test = [
... dict(offset=start, length=4, tag="test1"),
... dict(offset=start+5, length=4, tag="test2"),
... ]
>>> bool(_testGaps(test))
True
>>> test = [
... dict(offset=start, length=4, tag="test1"),
... dict(offset=start+8, length=4, tag="test2"),
... ]
>>> bool(_testGaps(test))
True
"""
errors = []
sorter = []
for entry in tableDirectory:
sorter.append((entry["offset"], entry))
prevTag = None
prevEnd = None
for offset, entry in sorted(sorter):
length = entry["length"]
length = calc4BytePaddedLength(length)
tag = entry["tag"]
if prevEnd is None:
prevEnd = offset + length
prevTag = tag
else:
if offset - prevEnd != 0:
errors.append("Improper padding between the %s and %s tables." % (prevTag, tag))
prevEnd = offset + length
prevTag = tag
return errors
def _testGapAfterFinalTable(dataLength, tableDirectory):
"""
>>> start = sfntDirectorySize + (sfntDirectoryEntrySize * 2)
>>> test = [
... dict(offset=start, length=1, tag="test")
... ]
>>> bool(_testGapAfterFinalTable(start + 4, test))
False
>>> test = [
... dict(offset=start, length=1, tag="test")
... ]
>>> bool(_testGapAfterFinalTable(start + 5, test))
True
>>> test = [
... dict(offset=start, length=1, tag="test")
... ]
>>> bool(_testGapAfterFinalTable(start + 8, test))
True
"""
errors = []
sorter = []
for entry in tableDirectory:
sorter.append((entry["offset"], entry))
entry = sorted(sorter)[-1]
offset = entry[-1]["offset"]
length = entry[-1]["length"]
length = calc4BytePaddedLength(length)
lastPosition = offset + length
if dataLength - lastPosition > 0:
errors.append("Improper padding at the end of the file.")
return errors
def _testCheckSums(tableDirectory):
"""
>>> data = "0" * 44
>>> checkSum = calcTableChecksum("test", data)
>>> test = [
... dict(data=data, checkSum=checkSum, tag="test")
... ]
>>> bool(_testCheckSums(test))
False
>>> test = [
... dict(data=data, checkSum=checkSum+1, tag="test")
... ]
>>> bool(_testCheckSums(test))
True
"""
errors = []
for entry in tableDirectory:
tag = entry["tag"]
checkSum = entry["checkSum"]
data = entry["data"]
shouldBe = calcTableChecksum(tag, data)
if checkSum != shouldBe:
errors.append("Invalid checksum for the %s table." % tag)
return errors
def _testHeadCheckSum(header, tableDirectory):
"""
>>> header = dict(sfntVersion="OTTO")
>>> tableDirectory = [
... dict(tag="head", offset=100, length=100, checkSum=123, data="00000000"+struct.pack(">L", 925903070)),
... dict(tag="aaab", offset=200, length=100, checkSum=456),
... dict(tag="aaac", offset=300, length=100, checkSum=789),
... ]
>>> bool(_testHeadCheckSum(header, tableDirectory))
"""
flavor = header["sfntVersion"]
tables = {}
for entry in tableDirectory:
tables[entry["tag"]] = entry
data = tables["head"]["data"][8:12]
checkSumAdjustment = struct.unpack(">L", data)[0]
shouldBe = calcHeadCheckSumAdjustment(flavor, tables)
if checkSumAdjustment != shouldBe:
return ["The head checkSumAdjustment value is incorrect."]
return []
def _testPaddingValue(tableDirectory, data):
"""
# before first table
>>> testDirectory = [dict(tag="aaaa", offset=28, length=4)]
>>> bool(_testPaddingValue(testDirectory, "\x01" * 32))
False
>>> testDirectory = [dict(tag="aaaa", offset=32, length=4)]
>>> bool(_testPaddingValue(testDirectory, "\x01" * 36))
True
# between tables
>>> testDirectory = [dict(tag="aaaa", offset=44, length=4), dict(tag="bbbb", offset=48, length=4)]
>>> bool(_testPaddingValue(testDirectory, "\x01" * 52))
False
>>> testDirectory = [dict(tag="aaaa", offset=44, length=4), dict(tag="bbbb", offset=52, length=4)]
>>> bool(_testPaddingValue(testDirectory, "\x01" * 56))
True
# after final table
>>> testDirectory = [dict(tag="aaaa", offset=28, length=4)]
>>> bool(_testPaddingValue(testDirectory, "\x01" * 32))
False
>>> testDirectory = [dict(tag="aaaa", offset=28, length=4)]
>>> bool(_testPaddingValue(testDirectory, "\x01" * 36))
True
"""
errors = []
# check between directory and first table
# check between all tables
entries = [(entry["offset"], entry) for entry in tableDirectory]
prev = "table directory"
prevEnd = sfntDirectorySize + (sfntDirectoryEntrySize * len(tableDirectory))
for o, entry in sorted(entries):
tag = entry["tag"]
offset = entry["offset"]
length = entry["length"]
# slice the bytes between the previous and the current
if offset > prevEnd:
bytes = data[prevEnd:offset]
# replace \0 with nothing
bytes = bytes.replace("\0", "")
if bytes:
errors.append("Bytes between %s and %s are not null." % (prev, tag))
# shift for teh next table
prev = tag
prevEnd = offset + length
# check last table
entry = sorted(entries)[-1][1]
end = entry["offset"] + entry["length"]
bytes = data[end:]
bytes = bytes.replace("\0", "")
if bytes:
errors.append("Bytes after final table (%s) are not null." % entry["tag"])
return errors
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=False)
|
darmaa/odoo
|
refs/heads/master
|
addons/crm_partner_assign/crm_lead.py
|
44
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class crm_lead(osv.osv):
_inherit = 'crm.lead'
def get_interested_action(self, cr, uid, interested, context=None):
try:
model, action_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'crm_partner_assign', 'crm_lead_channel_interested_act')
except ValueError:
raise osv.except_osv(_('Error!'), _("The CRM Channel Interested Action is missing"))
action = self.pool[model].read(cr, uid, action_id, context=context)
action_context = eval(action['context'])
action_context['interested'] = interested
action['context'] = str(action_context)
return action
def case_interested(self, cr, uid, ids, context=None):
return self.get_interested_action(cr, uid, True, context=context)
def case_disinterested(self, cr, uid, ids, context=None):
return self.get_interested_action(cr, uid, False, context=context)
def assign_salesman_of_assigned_partner(self, cr, uid, ids, context=None):
salesmans_leads = {}
for lead in self.browse(cr, uid, ids, context=context):
if (lead.stage_id.probability > 0 and lead.stage_id.probability < 100) or lead.stage_id.sequence == 1:
if lead.partner_assigned_id and lead.partner_assigned_id.user_id and lead.partner_assigned_id.user_id != lead.user_id:
salesman_id = lead.partner_assigned_id.user_id.id
if salesmans_leads.get(salesman_id):
salesmans_leads[salesman_id].append(lead.id)
else:
salesmans_leads[salesman_id] = [lead.id]
for salesman_id, lead_ids in salesmans_leads.items():
salesteam_id = self.on_change_user(cr, uid, lead_ids, salesman_id, context=None)['value'].get('section_id')
self.write(cr, uid, lead_ids, {'user_id': salesman_id, 'section_id': salesteam_id}, context=context)
|
googleapis/python-compute
|
refs/heads/master
|
google/cloud/compute_v1/services/node_groups/__init__.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .client import NodeGroupsClient
__all__ = ("NodeGroupsClient",)
|
huijunwu/heron
|
refs/heads/master
|
heron/shell/src/python/handlers/pmaphandler.py
|
5
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' pmaphandler.py '''
import json
import tornado.web
from heron.shell.src.python import utils
class PmapHandler(tornado.web.RequestHandler):
"""
Responsible for reporting memory map of a process given its pid.
"""
# pylint: disable=attribute-defined-outside-init
@tornado.web.asynchronous
def get(self, pid):
''' get method '''
body = utils.str_cmd(['pmap', '-pXX', pid], None, None)
self.content_type = 'application/json'
self.write(json.dumps(body))
self.finish()
|
Phuehvk/gyp
|
refs/heads/master
|
test/defines/gyptest-defines-env.py
|
501
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies build of an executable with C++ define specified by a gyp define.
"""
import os
import TestGyp
test = TestGyp.TestGyp()
# With the value only given in environment, it should be used.
try:
os.environ['GYP_DEFINES'] = 'value=10'
test.run_gyp('defines-env.gyp')
finally:
del os.environ['GYP_DEFINES']
test.build('defines-env.gyp')
expect = """\
VALUE is 10
"""
test.run_built_executable('defines', stdout=expect)
# With the value given in both command line and environment,
# command line should take precedence.
try:
os.environ['GYP_DEFINES'] = 'value=20'
test.run_gyp('defines-env.gyp', '-Dvalue=25')
finally:
del os.environ['GYP_DEFINES']
test.sleep()
test.touch('defines.c')
test.build('defines-env.gyp')
expect = """\
VALUE is 25
"""
test.run_built_executable('defines', stdout=expect)
# With the value only given in environment, it should be ignored if
# --ignore-environment is specified.
try:
os.environ['GYP_DEFINES'] = 'value=30'
test.run_gyp('defines-env.gyp', '--ignore-environment')
finally:
del os.environ['GYP_DEFINES']
test.sleep()
test.touch('defines.c')
test.build('defines-env.gyp')
expect = """\
VALUE is 5
"""
test.run_built_executable('defines', stdout=expect)
# With the value given in both command line and environment, and
# --ignore-environment also specified, command line should still be used.
try:
os.environ['GYP_DEFINES'] = 'value=40'
test.run_gyp('defines-env.gyp', '--ignore-environment', '-Dvalue=45')
finally:
del os.environ['GYP_DEFINES']
test.sleep()
test.touch('defines.c')
test.build('defines-env.gyp')
expect = """\
VALUE is 45
"""
test.run_built_executable('defines', stdout=expect)
test.pass_test()
|
Azulinho/ansible
|
refs/heads/devel
|
lib/ansible/modules/system/facter.py
|
125
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: facter
short_description: Runs the discovery program I(facter) on the remote system
description:
- Runs the I(facter) discovery program
(U(https://github.com/puppetlabs/facter)) on the remote system, returning
JSON data that can be useful for inventory purposes.
version_added: "0.2"
requirements:
- facter
- ruby-json
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = '''
# Example command-line invocation
ansible www.example.net -m facter
'''
import json
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict()
)
facter_path = module.get_bin_path('facter', opt_dirs=['/opt/puppetlabs/bin'])
cmd = [facter_path, "--json"]
rc, out, err = module.run_command(cmd, check_rc=True)
module.exit_json(**json.loads(out))
if __name__ == '__main__':
main()
|
SydOps/awscli.sh
|
refs/heads/master
|
python.boto/spl46/highlow.py
|
1
|
#!/usr/bin/env python
import random, time, os, sys
import boto.ec2.cloudwatch
# Connect to CloudWatch
ec2_region = os.environ.get('EC2_REGION')
connection = boto.ec2.cloudwatch.connect_to_region(ec2_region)
# Let them guess
count = 0
while True:
# Start of game?
if count == 0:
start_time = time.time()
num = random.randint(1, 100)
print "I'm thinking of a number from 1 to 100. Try to guess it! (Enter 0 to exit)"
# Guess a number
guess = input("> ")
count += 1
# Respond
if guess == 0:
# End game
sys.exit()
elif guess < num:
print "Too low!"
elif guess > num:
print "Too high!"
else:
# Correct answer
seconds = int(time.time() - start_time)
print "That's correct! It took you %d guesses and %d seconds.\n" % (count, seconds)
# Push metric to CloudWatch
connection.put_metric_data(namespace="Lab", name="highlow", value=seconds)
print "The metric has been sent to CloudWatch.\n"
# Start again
count = 0
|
thumbimigwe/echorizr
|
refs/heads/master
|
lib/python2.7/site-packages/django/contrib/postgres/signals.py
|
548
|
from psycopg2 import ProgrammingError
from psycopg2.extras import register_hstore
from django.utils import six
def register_hstore_handler(connection, **kwargs):
if connection.vendor != 'postgresql':
return
try:
if six.PY2:
register_hstore(connection.connection, globally=True, unicode=True)
else:
register_hstore(connection.connection, globally=True)
except ProgrammingError:
# Hstore is not available on the database.
#
# If someone tries to create an hstore field it will error there.
# This is necessary as someone may be using PSQL without extensions
# installed but be using other features of contrib.postgres.
#
# This is also needed in order to create the connection in order to
# install the hstore extension.
pass
|
xiaoyaozi5566/DynamicCache
|
refs/heads/master
|
src/python/m5/objects/__init__.py
|
87
|
# Copyright (c) 2010 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from m5.internal import params
from m5.SimObject import *
try:
modules = __loader__.modules
except NameError:
modules = { }
for module in modules.iterkeys():
if module.startswith('m5.objects.'):
exec "from %s import *" % module
|
photoninger/ansible
|
refs/heads/devel
|
lib/ansible/modules/windows/win_reboot.py
|
45
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_reboot
short_description: Reboot a windows machine
description:
- Reboot a Windows machine, wait for it to go down, come back up, and respond to commands.
version_added: "2.1"
options:
pre_reboot_delay:
description:
- Seconds for shutdown to wait before requesting reboot
default: 2
aliases: [ pre_reboot_delay_sec ]
post_reboot_delay:
description:
- Seconds to wait after the reboot was successful and the connection was re-established
- This is useful if you want wait for something to settle despite your connection already working
default: 0
version_added: '2.4'
aliases: [ post_reboot_delay_sec ]
shutdown_timeout:
description:
- Maximum seconds to wait for shutdown to occur
- Increase this timeout for very slow hardware, large update applications, etc
- This option has been removed since Ansible 2.5 as the win_reboot behavior has changed
default: 600
aliases: [ shutdown_timeout_sec ]
reboot_timeout:
description:
- Maximum seconds to wait for machine to re-appear on the network and respond to a test command
- This timeout is evaluated separately for both network appearance and test command success (so maximum clock time is actually twice this value)
default: 600
aliases: [ reboot_timeout_sec ]
connect_timeout:
description:
- Maximum seconds to wait for a single successful TCP connection to the WinRM endpoint before trying again
default: 5
aliases: [ connect_timeout_sec ]
test_command:
description:
- Command to expect success for to determine the machine is ready for management
default: whoami
msg:
description:
- Message to display to users
default: Reboot initiated by Ansible
notes:
- If a shutdown was already scheduled on the system, C(win_reboot) will abort the scheduled shutdown and enforce its own shutdown.
author:
- Matt Davis (@nitzmahone)
'''
EXAMPLES = r'''
# Unconditionally reboot the machine with all defaults
- win_reboot:
# Apply updates and reboot if necessary
- win_updates:
register: update_result
- win_reboot:
when: update_result.reboot_required
# Reboot a slow machine that might have lots of updates to apply
- win_reboot:
reboot_timeout: 3600
'''
RETURN = r'''
rebooted:
description: true if the machine was rebooted
returned: always
type: boolean
sample: true
elapsed:
description: The number of seconds that elapsed waiting for the system to be rebooted.
returned: always
type: int
sample: 23
'''
|
tiagovaz/qlive
|
refs/heads/master
|
Resources/AudioServer.py
|
1
|
import time
from pyo64 import *
from constants import *
from fxbox_def import *
import QLiveLib
class SoundFilePlayer:
def __init__(self, id, filename):
self.id = id
self.filename = filename
sndfolder = os.path.join(QLiveLib.getVar("projectFolder"), "sounds")
path = os.path.join(sndfolder, self.filename)
self.table = SndTable(path)
self.gain = SigTo(0, time=0.02, init=0)
self.looper = Looper(self.table, mul=self.gain).stop()
self.directout = False
self.mixerInputId = -1
def setAttributes(self, dict):
self.looper.mode = dict[ID_COL_LOOPMODE]
self.looper.pitch = dict[ID_COL_TRANSPO]
self.gain.value = pow(10, dict[ID_COL_GAIN] * 0.05)
self.looper.start = dict[ID_COL_STARTPOINT]
self.looper.dur = dict[ID_COL_ENDPOINT] - dict[ID_COL_STARTPOINT]
self.looper.xfade = dict[ID_COL_CROSSFADE]
if dict[ID_COL_PLAYING] == 1:
self.looper.reset()
self.looper.play()
audioMixer = QLiveLib.getVar("AudioMixer")
if dict[ID_COL_DIRECTOUT] and not self.directout:
self.directout = True
for i in range(len(self.looper)):
chnl = (i + dict[ID_COL_CHANNEL]) % NUM_CHNLS
self.mixerInputId = audioMixer.addToMixer(chnl, self.looper[i])
elif not dict[ID_COL_DIRECTOUT] and self.directout:
self.directout = False
audioMixer.delFromMixer(self.mixerInputId)
elif dict[ID_COL_PLAYING] == 0:
self.looper.stop()
def setAttribute(self, id, value):
if id == ID_COL_LOOPMODE:
self.looper.mode = value
elif id == ID_COL_TRANSPO:
self.looper.pitch = value
elif id == ID_COL_GAIN:
self.gain.value = pow(10, value * 0.05)
elif id == ID_COL_STARTPOINT:
self.looper.start = value
elif id == ID_COL_ENDPOINT:
self.looper.dur = value - self.looper.start
elif id == ID_COL_CROSSFADE:
self.looper.xfade = value
elif id == ID_COL_PLAYING:
if value == "Play":
self.looper.play()
elif value == "Stop":
self.looper.stop()
# handle ID_COL_DIRECTOUT and ID_COL_CHANNEL
class BaseAudioObject:
def __init__(self, chnls, ctrls, values, interps):
self.chnls = chnls
for i, ctrl in enumerate(ctrls):
name = ctrl[0]
if values is None:
val = ctrl[1]
else:
val = values[i]
if interps is None:
inter = 0.01
else:
inter = interps[i]
if name == "gain":
val = pow(10, val * 0.05)
setattr(self, name, SigTo(val, time=inter, init=val))
self.input = Sig([0] * self.chnls)
def setInput(self, sig):
self.input.value = sig
def getOutput(self):
return self.output
def setEnable(self, x):
self.output.value = [self.input, self.process][x]
class AudioNone(BaseAudioObject):
def __init__(self, chnls, ctrls, values, interps):
BaseAudioObject.__init__(self, chnls, ctrls, values, interps)
self.output = Sig(self.input)
def setEnable(self, x):
self.output.value = [[0.0] * self.chnls, self.input][x]
class AudioIn(BaseAudioObject):
def __init__(self, chnls, ctrls, values, interps):
BaseAudioObject.__init__(self, chnls, ctrls, values, interps)
self.output = Sig(self.input, mul=self.gain)
def setEnable(self, x):
self.output.value = [[0.0] * self.chnls, self.input][x]
class FxLowpass(BaseAudioObject):
def __init__(self, chnls, ctrls, values, interps):
BaseAudioObject.__init__(self, chnls, ctrls, values, interps)
self.filter = Biquad(self.input, freq=self.freq, q=self.Q, mul=self.gain)
self.process = Interp(self.input, self.filter, self.dryWet)
self.output = Sig(self.process)
class FxHighpass(BaseAudioObject):
def __init__(self, chnls, ctrls, values, interps):
BaseAudioObject.__init__(self, chnls, ctrls, values, interps)
self.filter = Biquad(self.input, freq=self.freq, q=self.Q, type=1, mul=self.gain)
self.process = Interp(self.input, self.filter, self.dryWet)
self.output = Sig(self.process)
class FxFreeverb(BaseAudioObject):
def __init__(self, chnls, ctrls, values, interps):
BaseAudioObject.__init__(self, chnls, ctrls, values, interps)
self.reverb = Freeverb(self.input, self.size, self.damp, 1, mul=self.gain)
self.process = Interp(self.input, self.reverb, self.dryWet)
self.output = Sig(self.process)
class FxStereoVerb(BaseAudioObject):
def __init__(self, chnls, ctrls, values, interps):
BaseAudioObject.__init__(self, chnls, ctrls, values, interps)
self.reverb = STRev(self.input, self.pan, self.revtime, self.cutoff, 1, mul=self.gain)
self.process = Interp(self.input, self.reverb, self.dryWet)
self.output = Sig(self.process)
class FxDisto(BaseAudioObject):
def __init__(self, chnls, ctrls, values, interps):
BaseAudioObject.__init__(self, chnls, ctrls, values, interps)
self.disto = Disto(self.input, self.drive, self.slope, mul=self.gain)
self.process = Interp(self.input, self.disto, self.dryWet)
self.output = Sig(self.process)
class FxDelay(BaseAudioObject):
def __init__(self, chnls, ctrls, values, interps):
BaseAudioObject.__init__(self, chnls, ctrls, values, interps)
self.delay = Delay(self.input, self.deltime, self.feed, 5, mul=self.gain)
self.process = Interp(self.input, self.delay, self.dryWet)
self.output = Sig(self.process)
class FxCompressor(BaseAudioObject):
def __init__(self, chnls, ctrls, values, interps):
BaseAudioObject.__init__(self, chnls, ctrls, values, interps)
self.comp = Compress(self.input, self.thresh, self.ratio, self.attack,
self.decay, 5, knee=0.5, mul=self.gain)
self.process = Interp(self.input, self.comp, self.dryWet)
self.output = Sig(self.process)
class FxFreqShift(BaseAudioObject):
def __init__(self, chnls, ctrls, values, interps):
BaseAudioObject.__init__(self, chnls, ctrls, values, interps)
self.shifter = FreqShift(self.input, self.shift, mul=self.gain)
self.process = Interp(self.input, self.shifter, self.dryWet)
self.output = Sig(self.process)
class FxHarmonizer(BaseAudioObject):
def __init__(self, chnls, ctrls, values, interps):
BaseAudioObject.__init__(self, chnls, ctrls, values, interps)
self.harmon = Harmonizer(self.input, self.transpo, self.feed, mul=self.gain)
self.process = Interp(self.input, self.harmon, self.dryWet)
self.output = Sig(self.process)
class FxPanning(BaseAudioObject):
def __init__(self, chnls, ctrls, values, interps):
BaseAudioObject.__init__(self, chnls, ctrls, values, interps)
self.panning = Pan(self.input, self.chnls, self.pan, self.spread, mul=self.gain)
self.process = Interp(self.input, self.panning)
self.output = Sig(self.process)
class FxAudioOut(BaseAudioObject):
def __init__(self, chnls, ctrls, values, interps):
BaseAudioObject.__init__(self, chnls, ctrls, values, interps)
self.process = self.input
self.output = Sig(self.process, mul=self.gain)
AUDIO_OBJECTS = {"None": AudioNone, "AudioIn": AudioIn, "Lowpass": FxLowpass,
"Highpass": FxHighpass, "Freeverb": FxFreeverb,
"StereoVerb": FxStereoVerb, "Disto": FxDisto, "Delay": FxDelay,
"Compressor": FxCompressor, "FreqShift": FxFreqShift,
"Harmonizer": FxHarmonizer, "Panning": FxPanning, "AudioOut": FxAudioOut}
class AudioServer:
def __init__(self):
self.server = Server(buffersize=64)
self.server.setMidiInputDevice(99)
self.server.boot()
self.soundfiles = []
self.audioObjects = []
self.recording = False
self.fade = Fader(fadein=1, fadeout=1, dur=0, mul=0.3)
self.cueMidiLearn = CueMidiLearn(self.cueMidiLearnCallback)
self.cueMidiLearnState = None
self.cueMidiNotes = {}
self.cueMidiNotein = Notein(poly=1)
self.cueMidiCall = TrigFunc(self.cueMidiNotein["trigon"], self.cueMidiNoteCallback)
def getSaveState(self):
return {"cueMidiNotes": self.cueMidiNotes}
def setSaveState(self, state):
if state:
self.cueMidiNotes = state["cueMidiNotes"]
for val, state in self.cueMidiNotes.items():
if state in ["up", "down"]:
QLiveLib.getVar("ControlPanel").setButtonTooltip(state,
"Midi key: %d" % val)
def createSoundFilePlayers(self):
objs = QLiveLib.getVar("Soundfiles").getSoundFileObjects()
for obj in objs:
id = obj.getId()
filename = obj.getFilename()
player = SoundFilePlayer(id, filename)
player.setAttributes(obj.getAttributes())
self.soundfiles.append(player)
obj.setPlayerRef(player)
def createBoxObjects(self):
tracks = QLiveLib.getVar("FxTracks").getTracks()
for track in tracks:
chnls = 1
for but in track.getButtonInputs():
name = but.name
if not name: name = "None"
if name == "AudioIn":
inchnls = but.getInChannels()
numchnls = inchnls.count(1)
ismulti = but.getIsMultiChannels()
if ismulti:
chnls = max(chnls, numchnls)
else:
chnls = 1
ctrls = INPUT_DICT[name]["ctrls"]
values = but.getCurrentValues()
if values is not None:
obj = AUDIO_OBJECTS[name](chnls, ctrls, values,
but.getCurrentInterps())
but.setAudioRef(obj)
self.audioObjects.append(obj)
for but in track.getButtonFxs():
name = but.name
if not name: name = "None"
ctrls = FX_DICT[name]["ctrls"]
values = but.getCurrentValues()
if values is not None:
obj = AUDIO_OBJECTS[name](chnls, ctrls, values,
but.getCurrentInterps())
but.setAudioRef(obj)
self.audioObjects.append(obj)
def resetPlayerRefs(self):
objs = QLiveLib.getVar("Soundfiles").getSoundFileObjects()
for obj in objs:
obj.setPlayerRef(None)
def resetObjectRefs(self):
tracks = QLiveLib.getVar("FxTracks").getTracks()
for track in tracks:
for but in track.getButtonInputs():
but.setAudioRef(None)
for but in track.getButtonFxs():
but.setAudioRef(None)
def start(self, state):
if state:
QLiveLib.getVar("AudioMixer").resetMixer()
self.createSoundFilePlayers()
self.createBoxObjects()
QLiveLib.getVar("FxTracks").start()
self.server.start()
else:
if self.recording:
self.recording = False
self.recStop()
self.stop()
self.resetPlayerRefs()
self.resetObjectRefs()
self.soundfiles = []
self.audioObjects = []
QLiveLib.getVar("CuesPanel").onSaveCue()
def record(self, state):
if state:
self.recording = True
self.recStart()
self.start(True)
else:
self.recording = False
self.recStop()
self.start(False)
def stop(self):
self.server.setAmp(0)
time.sleep(.1)
self.server.stop()
def shutdown(self):
self.server.shutdown()
def isStarted(self):
return self.server.getIsStarted()
def isBooted(self):
return self.server.getIsBooted()
def recStart(self, filename="", fileformat=0, sampletype=0):
self.server.recordOptions(fileformat=fileformat, sampletype=sampletype)
if not filename:
filename = os.path.basename(QLiveLib.getVar("currentProject"))
filename, ext = os.path.splitext(filename)
filename = os.path.join(QLiveLib.getVar("projectFolder"), "bounce", filename)
if fileformat >= 0 and fileformat < 8:
ext = RECORD_EXTENSIONS[fileformat]
else:
ext = ".wav"
date = time.strftime('_%d_%b_%Y_%Hh%M')
complete_filename = QLiveLib.toSysEncoding(filename+date+ext)
self.server.recstart(complete_filename)
def recStop(self):
self.server.recstop()
def cueMidiNoteCallback(self):
if not self.cueMidiLearn.isStarted():
if self.cueMidiNotes:
pit = self.cueMidiNotein.get("pitch")
if pit in self.cueMidiNotes:
QLiveLib.getVar("ControlPanel").moveCueFromMidi(self.cueMidiNotes[pit])
def setCueMidiLearnState(self, which):
self.cueMidiLearnState = which
def startCueMidiLearn(self):
self.cueMidiLearn.scan()
def stopCueMidiLearn(self):
self.cueMidiLearn.stop()
def cueMidiLearnCallback(self, val, ctl=False):
if ctl:
self.cueMidiCtls[val] = self.cueMidiLearnState
else:
self.cueMidiNotes[val] = self.cueMidiLearnState
QLiveLib.getVar("ControlPanel").setButtonTooltip(self.cueMidiLearnState,
"Midi key: %d" % val)
self.cueMidiLearnState = None
QLiveLib.getVar("ControlPanel").resetCueButtonBackgroundColour()
class MidiLearn:
def __init__(self, callback):
self.callback = callback
self.scanner = CtlScan2(self.scanned, True).stop()
def scan(self):
self.scanner.reset()
self.scanner.play()
def stop(self):
self.scanner.stop()
def scanned(self, ctlnum, midichnl):
self.callback(ctlnum, midichnl)
self.scanner.stop()
class CueMidiLearn:
def __init__(self, callback):
self.callback = callback
self.started = False
self.current_pitch = -1
self.scanner = CtlScan(self.scanned, True).stop()
self.notes = Notein(poly=1).stop()
self.notecall = TrigFunc(self.notes["trigon"], self.noteon).stop()
self.notecall2 = TrigFunc(self.notes["trigoff"], self.noteoff).stop()
def scan(self):
self.scanner.reset()
self.scanner.play()
self.notes.play()
self.notecall.play()
self.notecall2.play()
self.started = True
def stop(self):
self.scanner.stop()
self.notes.stop()
self.notecall.stop()
self.notecall2.stop()
self.started = False
def scanned(self, ctlnum):
if 0:
self.callback(ctlnum, ctl=True)
self.stop()
def noteon(self):
pit = int(self.notes.get("pitch"))
self.current_pitch = pit
self.callback(pit)
def noteoff(self):
pit = int(self.notes.get("pitch"))
if pit == self.current_pitch:
self.stop()
self.current_pitch = -1
def isStarted(self):
return self.started
|
mahabs/nitro
|
refs/heads/master
|
nssrc/com/citrix/netscaler/nitro/resource/config/cs/csvserver_spilloverpolicy_binding.py
|
1
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class csvserver_spilloverpolicy_binding(base_resource) :
""" Binding class showing the spilloverpolicy that can be bound to csvserver.
"""
def __init__(self) :
self._policyname = ""
self._gotopriorityexpression = ""
self._bindpoint = ""
self._priority = 0
self._name = ""
self._targetlbvserver = ""
self._invoke = False
self._labeltype = ""
self._labelname = ""
self.___count = 0
@property
def priority(self) :
"""Priority for the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
"""Priority for the policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def bindpoint(self) :
"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE.
"""
try :
return self._bindpoint
except Exception as e:
raise e
@bindpoint.setter
def bindpoint(self, bindpoint) :
"""The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE
"""
try :
self._bindpoint = bindpoint
except Exception as e:
raise e
@property
def policyname(self) :
"""Policies bound to this vserver.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
"""Policies bound to this vserver.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
@property
def labelname(self) :
"""Name of the label to be invoked.
"""
try :
return self._labelname
except Exception as e:
raise e
@labelname.setter
def labelname(self, labelname) :
"""Name of the label to be invoked.
"""
try :
self._labelname = labelname
except Exception as e:
raise e
@property
def name(self) :
"""Name of the content switching virtual server to which the content switching policy applies.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the content switching virtual server to which the content switching policy applies.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def targetlbvserver(self) :
"""Name of the Load Balancing virtual server to which the content is switched, if policy rule is evaluated to be TRUE.
Example: bind cs vs cs1 -policyname pol1 -priority 101 -targetLBVserver lb1
Note: Use this parameter only in case of Content Switching policy bind operations to a CS vserver.
"""
try :
return self._targetlbvserver
except Exception as e:
raise e
@targetlbvserver.setter
def targetlbvserver(self, targetlbvserver) :
"""Name of the Load Balancing virtual server to which the content is switched, if policy rule is evaluated to be TRUE.
Example: bind cs vs cs1 -policyname pol1 -priority 101 -targetLBVserver lb1
Note: Use this parameter only in case of Content Switching policy bind operations to a CS vserver
"""
try :
self._targetlbvserver = targetlbvserver
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def invoke(self) :
"""Invoke a policy label if this policy's rule evaluates to TRUE (valid only for default-syntax policies such as application firewall, transform, integrated cache, rewrite, responder, and content switching).
"""
try :
return self._invoke
except Exception as e:
raise e
@invoke.setter
def invoke(self, invoke) :
"""Invoke a policy label if this policy's rule evaluates to TRUE (valid only for default-syntax policies such as application firewall, transform, integrated cache, rewrite, responder, and content switching).
"""
try :
self._invoke = invoke
except Exception as e:
raise e
@property
def labeltype(self) :
"""Type of label to be invoked.
"""
try :
return self._labeltype
except Exception as e:
raise e
@labeltype.setter
def labeltype(self, labeltype) :
"""Type of label to be invoked.
"""
try :
self._labeltype = labeltype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(csvserver_spilloverpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.csvserver_spilloverpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = csvserver_spilloverpolicy_binding()
updateresource.name = resource.name
updateresource.policyname = resource.policyname
updateresource.targetlbvserver = resource.targetlbvserver
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.bindpoint = resource.bindpoint
updateresource.invoke = resource.invoke
updateresource.labeltype = resource.labeltype
updateresource.labelname = resource.labelname
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [csvserver_spilloverpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policyname = resource[i].policyname
updateresources[i].targetlbvserver = resource[i].targetlbvserver
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].bindpoint = resource[i].bindpoint
updateresources[i].invoke = resource[i].invoke
updateresources[i].labeltype = resource[i].labeltype
updateresources[i].labelname = resource[i].labelname
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = csvserver_spilloverpolicy_binding()
deleteresource.name = resource.name
deleteresource.policyname = resource.policyname
deleteresource.bindpoint = resource.bindpoint
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [csvserver_spilloverpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policyname = resource[i].policyname
deleteresources[i].bindpoint = resource[i].bindpoint
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
""" Use this API to fetch csvserver_spilloverpolicy_binding resources.
"""
try :
obj = csvserver_spilloverpolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
""" Use this API to fetch filtered set of csvserver_spilloverpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = csvserver_spilloverpolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
""" Use this API to count csvserver_spilloverpolicy_binding resources configued on NetScaler.
"""
try :
obj = csvserver_spilloverpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
""" Use this API to count the filtered set of csvserver_spilloverpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = csvserver_spilloverpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Bindpoint:
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class csvserver_spilloverpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.csvserver_spilloverpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.csvserver_spilloverpolicy_binding = [csvserver_spilloverpolicy_binding() for _ in range(length)]
|
geminy/aidear
|
refs/heads/master
|
oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/tools/grit/grit/scons.py
|
62
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''SCons integration for GRIT.
'''
# NOTE: DO NOT IMPORT ANY GRIT STUFF HERE - we import lazily so that
# grit and its dependencies aren't imported until actually needed.
import os
import types
def _IsDebugEnabled():
return 'GRIT_DEBUG' in os.environ and os.environ['GRIT_DEBUG'] == '1'
def _SourceToFile(source):
'''Return the path to the source file, given the 'source' argument as provided
by SCons to the _Builder or _Emitter functions.
'''
# Get the filename of the source. The 'source' parameter can be a string,
# a "node", or a list of strings or nodes.
if isinstance(source, types.ListType):
source = str(source[0])
else:
source = str(source)
return source
def _ParseRcFlags(flags):
"""Gets a mapping of defines.
Args:
flags: env['RCFLAGS']; the input defines.
Returns:
A tuple of (defines, res_file):
defines: A mapping of {name: val}
res_file: None, or the specified res file for static file dependencies.
"""
from grit import util
defines = {}
res_file = None
# Get the CPP defines from the environment.
res_flag = '--res_file='
for flag in flags:
if flag.startswith(res_flag):
res_file = flag[len(res_flag):]
continue
if flag.startswith('/D'):
flag = flag[2:]
name, val = util.ParseDefine(flag)
# Only apply to first instance of a given define
if name not in defines:
defines[name] = val
return (defines, res_file)
def _Builder(target, source, env):
print _SourceToFile(source)
from grit import grit_runner
from grit.tool import build
options = grit_runner.Options()
# This sets options to default values
options.ReadOptions([])
options.input = _SourceToFile(source)
# TODO(joi) Check if we can get the 'verbose' option from the environment.
builder = build.RcBuilder(defines=_ParseRcFlags(env['RCFLAGS'])[0])
# To ensure that our output files match what we promised SCons, we
# use the list of targets provided by SCons and update the file paths in
# our .grd input file with the targets.
builder.scons_targets = [str(t) for t in target]
builder.Run(options, [])
return None # success
def _GetOutputFiles(grd, base_dir):
"""Processes outputs listed in the grd into rc_headers and rc_alls.
Note that anything that's not an rc_header is classified as an rc_all.
Args:
grd: An open GRD reader.
Returns:
A tuple of (rc_headers, rc_alls, lang_folders):
rc_headers: Outputs marked as rc_header.
rc_alls: All other outputs.
lang_folders: The output language folders.
"""
rc_headers = []
rc_alls = []
lang_folders = {}
# Explicit output files.
for output in grd.GetOutputFiles():
path = os.path.join(base_dir, output.GetFilename())
if (output.GetType() == 'rc_header'):
rc_headers.append(path)
else:
rc_alls.append(path)
if _IsDebugEnabled():
print 'GRIT: Added target %s' % path
if output.attrs['lang'] != '':
lang_folders[output.attrs['lang']] = os.path.dirname(path)
return (rc_headers, rc_alls, lang_folders)
def _ProcessNodes(grd, base_dir, lang_folders):
"""Processes the GRD nodes to figure out file dependencies.
Args:
grd: An open GRD reader.
base_dir: The base directory for filenames.
lang_folders: THe output language folders.
Returns:
A tuple of (structure_outputs, translated_files, static_files):
structure_outputs: Structures marked as sconsdep.
translated_files: Files that are structures or skeletons, and get
translated by GRIT.
static_files: Files that are includes, and are used directly by res files.
"""
structure_outputs = []
translated_files = []
static_files = []
# Go through nodes, figuring out resources. Also output certain resources
# as build targets, based on the sconsdep flag.
for node in grd.ActiveDescendants():
with node:
file = node.ToRealPath(node.GetInputPath())
if node.name == 'structure':
translated_files.append(os.path.abspath(file))
# TODO(joi) Should remove the "if sconsdep is true" thing as it is a
# hack - see grit/node/structure.py
if node.HasFileForLanguage() and node.attrs['sconsdep'] == 'true':
for lang in lang_folders:
path = node.FileForLanguage(lang, lang_folders[lang],
create_file=False,
return_if_not_generated=False)
if path:
structure_outputs.append(path)
if _IsDebugEnabled():
print 'GRIT: Added target %s' % path
elif (node.name == 'skeleton' or (node.name == 'file' and node.parent and
node.parent.name == 'translations')):
translated_files.append(os.path.abspath(file))
elif node.name == 'include':
# If it's added by file name and the file isn't easy to find, don't make
# it a dependency. This could add some build flakiness, but it doesn't
# work otherwise.
if node.attrs['filenameonly'] != 'true' or os.path.exists(file):
static_files.append(os.path.abspath(file))
# If it's output from mk, look in the output directory.
elif node.attrs['mkoutput'] == 'true':
static_files.append(os.path.join(base_dir, os.path.basename(file)))
return (structure_outputs, translated_files, static_files)
def _SetDependencies(env, base_dir, res_file, rc_alls, translated_files,
static_files):
"""Sets dependencies in the environment.
Args:
env: The SCons environment.
base_dir: The base directory for filenames.
res_file: The res_file specified in the RC flags.
rc_alls: All non-rc_header outputs.
translated_files: Files that are structures or skeletons, and get
translated by GRIT.
static_files: Files that are includes, and are used directly by res files.
"""
if res_file:
env.Depends(os.path.join(base_dir, res_file), static_files)
else:
# Make a best effort dependency setup when no res file is specified.
translated_files.extend(static_files)
for rc_all in rc_alls:
env.Depends(rc_all, translated_files)
def _Emitter(target, source, env):
"""Modifies the list of targets to include all outputs.
Note that this also sets up the dependencies, even though it's an emitter
rather than a scanner. This is so that the resource header file doesn't show
as having dependencies.
Args:
target: The list of targets to emit for.
source: The source or list of sources for the target.
env: The SCons environment.
Returns:
A tuple of (targets, sources).
"""
from grit import grd_reader
from grit import util
(defines, res_file) = _ParseRcFlags(env['RCFLAGS'])
grd = grd_reader.Parse(_SourceToFile(source), debug=_IsDebugEnabled())
# TODO(jperkins): This is a hack to get an output context set for the reader.
# This should really be smarter about the language.
grd.SetOutputLanguage('en')
grd.SetDefines(defines)
base_dir = util.dirname(str(target[0]))
(rc_headers, rc_alls, lang_folders) = _GetOutputFiles(grd, base_dir)
(structure_outputs, translated_files, static_files) = _ProcessNodes(grd,
base_dir, lang_folders)
rc_alls.extend(structure_outputs)
_SetDependencies(env, base_dir, res_file, rc_alls, translated_files,
static_files)
targets = rc_headers
targets.extend(rc_alls)
# Return target and source lists.
return (targets, source)
# Function name is mandated by newer versions of SCons.
def generate(env):
# Importing this module should be possible whenever this function is invoked
# since it should only be invoked by SCons.
import SCons.Builder
import SCons.Action
# The varlist parameter tells SCons that GRIT needs to be invoked again
# if RCFLAGS has changed since last compilation.
build_action = SCons.Action.FunctionAction(_Builder, varlist=['RCFLAGS'])
emit_action = SCons.Action.FunctionAction(_Emitter, varlist=['RCFLAGS'])
builder = SCons.Builder.Builder(action=build_action, emitter=emit_action,
src_suffix='.grd')
# Add our builder and scanner to the environment.
env.Append(BUILDERS = {'GRIT': builder})
# Function name is mandated by newer versions of SCons.
def exists(env):
return 1
|
kalxas/geonode
|
refs/heads/master
|
geonode/documents/api/__init__.py
|
42
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2020 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
|
hfeeki/cmdln
|
refs/heads/master
|
test/cmdln_helloworld.py
|
1
|
#!/usr/bin/env python
"""
$ python cmdln_helloworld.py #expecttest: INTERACTIVE, PROMPT="hi> "
hi> hi
Hi, stranger!
hi> hi Trent
Hi, Trent!
hi> ^D
$ python cmdln_helloworld.py hi Guido
Hi, Guido!
"""
import sys
import cmdln
class HelloWorld(cmdln.RawCmdln):
prompt = "hi> "
def do_hi(self, argv):
"""say hi"""
name = len(argv)>1 and argv[1] or "stranger"
print("Hi, %s!" % name)
if __name__ == "__main__":
sys.exit(HelloWorld().main(loop=cmdln.LOOP_IF_EMPTY))
|
janebeckman/gpdb
|
refs/heads/master
|
gpMgmt/bin/gppylib/test/unit/test_unit_compare_segment_guc.py
|
16
|
from mock import *
from gp_unittest import *
from gpconfig_modules.compare_segment_guc import MultiValueGuc
from gpconfig_modules.database_segment_guc import DatabaseSegmentGuc
from gpconfig_modules.file_segment_guc import FileSegmentGuc
class CompareSegmentGucTest(GpTestCase):
def setUp(self):
row = ['contentid', 'guc_name', 'file_value', "dbid"]
self.file_seg_guc = FileSegmentGuc(row)
row = ['contentid', 'guc_name', 'sql_value']
self.db_seg_guc = DatabaseSegmentGuc(row)
self.subject = MultiValueGuc(self.file_seg_guc, self.db_seg_guc)
def test_init_when_comparison_guc_supplied(self):
row = ['contentid', 'guc_name', 'file_value', "diff_dbid"]
file_seg_guc = FileSegmentGuc(row)
old = self.subject
self.subject = MultiValueGuc(self.subject, file_seg_guc)
self.assertEquals(self.subject.db_seg_guc, old.db_seg_guc)
self.assertEquals(self.subject.primary_file_seg_guc, old.primary_file_seg_guc)
self.assertEquals(self.subject.mirror_file_seg_guc, file_seg_guc)
def test_init_with_wrong_content_id_raises(self):
row = ['contentid', 'guc_name', 'file_value', "dbid"]
file_seg_guc = FileSegmentGuc(row)
row = ['different', 'guc_name', 'sql_value']
db_seg_guc = DatabaseSegmentGuc(row)
with self.assertRaisesRegexp(Exception, "Not the same context"):
MultiValueGuc(file_seg_guc, db_seg_guc)
def test_init_handles_both_orders(self):
self.assertEquals(self.file_seg_guc, self.subject.primary_file_seg_guc)
self.assertEquals(self.db_seg_guc, self.subject.db_seg_guc)
self.assertTrue(isinstance(self.subject.primary_file_seg_guc, FileSegmentGuc))
self.assertTrue(isinstance(self.subject.db_seg_guc, DatabaseSegmentGuc))
self.subject = MultiValueGuc(self.db_seg_guc, self.file_seg_guc)
self.assertEquals(self.file_seg_guc, self.subject.primary_file_seg_guc)
self.assertEquals(self.db_seg_guc, self.subject.db_seg_guc)
self.assertTrue(isinstance(self.subject.primary_file_seg_guc, FileSegmentGuc))
self.assertTrue(isinstance(self.subject.db_seg_guc, DatabaseSegmentGuc))
def test_init_when_none_raises(self):
with self.assertRaisesRegexp(Exception, "comparison requires two gucs"):
self.subject = MultiValueGuc(self.db_seg_guc, None)
with self.assertRaisesRegexp(Exception, "comparison requires two gucs"):
self.subject = MultiValueGuc(None, self.db_seg_guc)
def test_report_fail_format_for_database_and_file_gucs(self):
self.assertEquals(self.subject.report_fail_format(),
["[context: contentid] [dbid: dbid] [name: guc_name] [value: sql_value | file: file_value]"])
def test_report_fail_format_file_segment_guc_only(self):
self.subject.db_seg_guc = None
row = ['contentid', 'guc_name', 'primary_value', "dbid1"]
self.subject.set_primary_file_segment(FileSegmentGuc(row))
row = ['contentid', 'guc_name', 'mirror_value', "dbid2"]
self.subject.set_mirror_file_segment(FileSegmentGuc(row))
self.assertEquals(self.subject.report_fail_format(),
["[context: contentid] [dbid: dbid1] [name: guc_name] [value: primary_value]",
"[context: contentid] [dbid: dbid2] [name: guc_name] [value: mirror_value]"])
def test_when_segment_report_success_format(self):
self.assertEquals(self.subject.report_success_format(),
"Segment value: sql_value | file: file_value")
def test_when_values_match_report_success_format_file_compare(self):
self.subject.db_seg_guc.value = 'value'
self.subject.primary_file_seg_guc.value = 'value'
self.assertEquals(self.subject.report_success_format(), "Segment value: value | file: value")
def test_is_internally_consistent_fails(self):
self.assertEquals(self.subject.is_internally_consistent(), False)
def test_is_internally_consistent_when_file_value_is_none_succeeds(self):
self.file_seg_guc.value = None
self.assertEquals(self.subject.is_internally_consistent(), True)
def test_is_internally_consistent_when_primary_is_same_succeeds(self):
self.subject.primary_file_seg_guc.value = "sql_value"
self.assertEquals(self.subject.is_internally_consistent(), True)
def test_is_internally_consistent_when_mirror_is_different_fails(self):
self.subject.primary_file_seg_guc.value = "sql_value"
row = ['contentid', 'guc_name', 'diffvalue', "dbid1"]
self.subject.set_mirror_file_segment(FileSegmentGuc(row))
self.assertEquals(self.subject.is_internally_consistent(), False)
def test_set_file_segment_succeeds(self):
row = ['contentid', 'guc_name', 'file_value', "diff_dbid"]
file_seg_guc = FileSegmentGuc(row)
self.subject.set_mirror_file_segment(file_seg_guc)
self.assertEquals(self.subject.mirror_file_seg_guc, file_seg_guc)
def test_get_value_returns_unique(self):
self.assertEquals(self.subject.get_value(), "sql_value||file_value")
|
imsparsh/pyo
|
refs/heads/master
|
doc-sphinx/source/api/classes/get_templates.py
|
11
|
from pyo import *
cats = OBJECTS_TREE['PyoObjectBase']['PyoTableObject']
cats.sort()
module = """%s
===================================
.. module:: pyo
"""
template = """*%s*
-----------------------------------
.. autoclass:: %s
:members:
"""
with open("tables.rst", "w") as f:
f.write(module % "Tables")
for obj in cats:
f.write(template % (obj, obj))
|
CMPUT404Proj/CMPUT404-project-socialdistribution
|
refs/heads/master
|
social_dist/api/models.py
|
10644
|
from django.db import models
# Create your models here.
|
hustlzp/eve
|
refs/heads/develop
|
eve/tests/io/__init__.py
|
14224
|
# -*- coding: utf-8 -*-
|
neoareslinux/neutron
|
refs/heads/master
|
neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/qos_driver.py
|
11
|
# Copyright (c) 2015 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from neutron.agent.common import ovs_lib
from neutron.agent.l2.extensions import qos
from neutron.i18n import _LW
from neutron.plugins.ml2.drivers.openvswitch.mech_driver import (
mech_openvswitch)
LOG = logging.getLogger(__name__)
class QosOVSAgentDriver(qos.QosAgentDriver):
_SUPPORTED_RULES = (
mech_openvswitch.OpenvswitchMechanismDriver.supported_qos_rule_types)
def __init__(self):
super(QosOVSAgentDriver, self).__init__()
self.br_int_name = cfg.CONF.OVS.integration_bridge
self.br_int = None
def initialize(self):
self.br_int = ovs_lib.OVSBridge(self.br_int_name)
def create(self, port, qos_policy):
self._handle_rules('create', port, qos_policy)
def update(self, port, qos_policy):
self._handle_rules('update', port, qos_policy)
def delete(self, port, qos_policy):
# TODO(QoS): consider optimizing flushing of all QoS rules from the
# port by inspecting qos_policy.rules contents
self._delete_bandwidth_limit(port)
def _handle_rules(self, action, port, qos_policy):
for rule in qos_policy.rules:
if rule.rule_type in self._SUPPORTED_RULES:
handler_name = ("".join(("_", action, "_", rule.rule_type)))
handler = getattr(self, handler_name)
handler(port, rule)
else:
LOG.warning(_LW('Unsupported QoS rule type for %(rule_id)s: '
'%(rule_type)s; skipping'),
{'rule_id': rule.id, 'rule_type': rule.rule_type})
def _create_bandwidth_limit(self, port, rule):
self._update_bandwidth_limit(port, rule)
def _update_bandwidth_limit(self, port, rule):
port_name = port['vif_port'].port_name
max_kbps = rule.max_kbps
max_burst_kbps = rule.max_burst_kbps
self.br_int.create_egress_bw_limit_for_port(port_name,
max_kbps,
max_burst_kbps)
def _delete_bandwidth_limit(self, port):
port_name = port['vif_port'].port_name
self.br_int.delete_egress_bw_limit_for_port(port_name)
|
henriknelson/micropython
|
refs/heads/master
|
tools/tinytest-codegen.py
|
7
|
#!/usr/bin/env python3
import os, sys
from glob import glob
from re import sub
import argparse
def escape(s):
s = s.decode()
lookup = {
'\0': '\\0',
'\t': '\\t',
'\n': '\\n\"\n\"',
'\r': '\\r',
'\\': '\\\\',
'\"': '\\\"',
}
return "\"\"\n\"{}\"".format(''.join([lookup[x] if x in lookup else x for x in s]))
def chew_filename(t):
return { 'func': "test_{}_fn".format(sub(r'/|\.|-', '_', t)), 'desc': t }
def script_to_map(test_file):
r = {"name": chew_filename(test_file)["func"]}
with open(test_file, "rb") as f:
r["script"] = escape(f.read())
with open(test_file + ".exp", "rb") as f:
r["output"] = escape(f.read())
return r
test_function = (
"void {name}(void* data) {{\n"
" static const char pystr[] = {script};\n"
" static const char exp[] = {output};\n"
" upytest_set_expected_output(exp, sizeof(exp) - 1);\n"
" upytest_execute_test(pystr);\n"
"}}"
)
testcase_struct = (
"struct testcase_t {name}_tests[] = {{\n{body}\n END_OF_TESTCASES\n}};"
)
testcase_member = (
" {{ \"{desc}\", {func}, TT_ENABLED_, 0, 0 }},"
)
testgroup_struct = (
"struct testgroup_t groups[] = {{\n{body}\n END_OF_GROUPS\n}};"
)
testgroup_member = (
" {{ \"{name}\", {name}_tests }},"
)
## XXX: may be we could have `--without <groups>` argument...
# currently these tests are selected because they pass on qemu-arm
test_dirs = ('basics', 'micropython', 'float', 'extmod', 'inlineasm') # 'import', 'io', 'misc')
exclude_tests = (
# pattern matching in .exp
'basics/bytes_compare3.py',
'extmod/ticks_diff.py',
'extmod/time_ms_us.py',
'extmod/uheapq_timeq.py',
# unicode char issue
'extmod/ujson_loads.py',
# doesn't output to python stdout
'extmod/ure_debug.py',
'extmod/vfs_basic.py',
'extmod/vfs_fat_ramdisk.py', 'extmod/vfs_fat_fileio.py',
'extmod/vfs_fat_fsusermount.py', 'extmod/vfs_fat_oldproto.py',
# rounding issues
'float/float_divmod.py',
# requires double precision floating point to work
'float/float2int_doubleprec_intbig.py',
'float/float_parse_doubleprec.py',
# inline asm FP tests (require Cortex-M4)
'inlineasm/asmfpaddsub.py', 'inlineasm/asmfpcmp.py', 'inlineasm/asmfpldrstr.py',
'inlineasm/asmfpmuldiv.py','inlineasm/asmfpsqrt.py',
# different filename in output
'micropython/emg_exc.py',
'micropython/heapalloc_traceback.py',
# pattern matching in .exp
'micropython/meminfo.py',
)
output = []
tests = []
argparser = argparse.ArgumentParser(description='Convert native MicroPython tests to tinytest/upytesthelper C code')
argparser.add_argument('--stdin', action="store_true", help='read list of tests from stdin')
args = argparser.parse_args()
if not args.stdin:
for group in test_dirs:
tests += [test for test in glob('{}/*.py'.format(group)) if test not in exclude_tests]
else:
for l in sys.stdin:
tests.append(l.rstrip())
output.extend([test_function.format(**script_to_map(test)) for test in tests])
testcase_members = [testcase_member.format(**chew_filename(test)) for test in tests]
output.append(testcase_struct.format(name="", body='\n'.join(testcase_members)))
testgroup_members = [testgroup_member.format(name=group) for group in [""]]
output.append(testgroup_struct.format(body='\n'.join(testgroup_members)))
## XXX: may be we could have `--output <filename>` argument...
# Don't depend on what system locale is set, use utf8 encoding.
sys.stdout.buffer.write('\n\n'.join(output).encode('utf8'))
|
MalloyPower/parsing-python
|
refs/heads/master
|
front-end/testsuite-python-lib/Python-3.6.0/Lib/test/test_importlib/extension/test_case_sensitivity.py
|
8
|
from importlib import _bootstrap_external
from test import support
import unittest
from .. import util
importlib = util.import_importlib('importlib')
machinery = util.import_importlib('importlib.machinery')
@unittest.skipIf(util.EXTENSIONS.filename is None, '_testcapi not available')
@util.case_insensitive_tests
class ExtensionModuleCaseSensitivityTest(util.CASEOKTestBase):
def find_module(self):
good_name = util.EXTENSIONS.name
bad_name = good_name.upper()
assert good_name != bad_name
finder = self.machinery.FileFinder(util.EXTENSIONS.path,
(self.machinery.ExtensionFileLoader,
self.machinery.EXTENSION_SUFFIXES))
return finder.find_module(bad_name)
def test_case_sensitive(self):
with support.EnvironmentVarGuard() as env:
env.unset('PYTHONCASEOK')
self.caseok_env_changed(should_exist=False)
loader = self.find_module()
self.assertIsNone(loader)
def test_case_insensitivity(self):
with support.EnvironmentVarGuard() as env:
env.set('PYTHONCASEOK', '1')
self.caseok_env_changed(should_exist=True)
loader = self.find_module()
self.assertTrue(hasattr(loader, 'load_module'))
(Frozen_ExtensionCaseSensitivity,
Source_ExtensionCaseSensitivity
) = util.test_both(ExtensionModuleCaseSensitivityTest, importlib=importlib,
machinery=machinery)
if __name__ == '__main__':
unittest.main()
|
neerajvashistha/pa-dude
|
refs/heads/master
|
lib/python2.7/site-packages/snowballstemmer/basestemmer.py
|
18
|
class BaseStemmer(object):
def __init__(self):
self.set_current("")
self.maxCacheSize = 10000
self._cache = {}
self._counter = 0
def set_current(self, value):
'''
Set the self.current string.
'''
self.current = value
self.cursor = 0
self.limit = len(self.current)
self.limit_backward = 0
self.bra = self.cursor
self.ket = self.limit
def get_current(self):
'''
Get the self.current string.
'''
return self.current
def copy_from(self, other):
self.current = other.current
self.cursor = other.cursor
self.limit = other.limit
self.limit_backward = other.limit_backward
self.bra = other.bra
self.ket = other.ket
def in_grouping(self, s, min, max):
if self.cursor >= self.limit:
return False
ch = ord(self.current[self.cursor])
if ch > max or ch < min:
return False
ch -= min
if (s[ch >> 3] & (0x1 << (ch & 0x7))) == 0:
return False
self.cursor += 1
return True
def in_grouping_b(self, s, min, max):
if self.cursor <= self.limit_backward:
return False
ch = ord(self.current[self.cursor - 1])
if ch > max or ch < min:
return False
ch -= min
if (s[ch >> 3] & (0x1 << (ch & 0x7))) == 0:
return False
self.cursor -= 1
return True
def out_grouping(self, s, min, max):
if self.cursor >= self.limit:
return False
ch = ord(self.current[self.cursor])
if ch > max or ch < min:
self.cursor += 1
return True
ch -= min
if (s[ch >> 3] & (0X1 << (ch & 0x7))) == 0:
self.cursor += 1
return True
return False
def out_grouping_b(self, s, min, max):
if self.cursor <= self.limit_backward:
return False
ch = ord(self.current[self.cursor - 1])
if ch > max or ch < min:
self.cursor -= 1
return True
ch -= min
if (s[ch >> 3] & (0X1 << (ch & 0x7))) == 0:
self.cursor -= 1
return True
return False
def in_range(self, min, max):
if self.cursor >= self.limit:
return False
ch = ord(self.current[self.cursor])
if ch > max or ch < min:
return False
self.cursor += 1
return True
def in_range_b(self, min, max):
if self.cursor <= self.limit_backward:
return False
ch = ord(self.current[self.cursor - 1])
if ch > max or ch < min:
return False
self.cursor -= 1
return True
def out_range(self, min, max):
if self.cursor >= self.limit:
return False
ch = ord(self.current[self.cursor])
if not (ch > max or ch < min):
return False
self.cursor += 1
return True
def out_range_b(self, min, max):
if self.cursor <= self.limit_backward:
return False
ch = ord(self.current[self.cursor - 1])
if not (ch > max or ch < min):
return False
self.cursor -= 1
return True
def eq_s(self, s_size, s):
if self.limit - self.cursor < s_size:
return False
if self.current[self.cursor:self.cursor + s_size] != s:
return False
self.cursor += s_size
return True
def eq_s_b(self, s_size, s):
if self.cursor - self.limit_backward < s_size:
return False
if self.current[self.cursor - s_size:self.cursor] != s:
return False
self.cursor -= s_size
return True
def eq_v(self, s):
return self.eq_s(len(s), s)
def eq_v_b(self, s):
return self.eq_s_b(len(s), s)
def find_among(self, v, v_size):
i = 0
j = v_size
c = self.cursor
l = self.limit
common_i = 0
common_j = 0
first_key_inspected = False
while True:
k = i + ((j - i) >> 1)
diff = 0
common = min(common_i, common_j) # smalle
w = v[k]
for i2 in range(common, w.s_size):
if c + common == l:
diff = -1
break
diff = ord(self.current[c + common]) - ord(w.s[i2])
if diff != 0:
break
common += 1
if diff < 0:
j = k
common_j = common
else:
i = k
common_i = common
if j - i <= 1:
if i > 0:
break # v->s has been inspected
if j == i:
break # only one item in v
# - but now we need to go round once more to get
# v->s inspected. self looks messy, but is actually
# the optimal approach.
if first_key_inspected:
break
first_key_inspected = True
while True:
w = v[i]
if common_i >= w.s_size:
self.cursor = c + w.s_size
if w.method is None:
return w.result
method = getattr(self, w.method)
res = method()
self.cursor = c + w.s_size
if res:
return w.result
i = w.substring_i
if i < 0:
return 0
return -1 # not reachable
def find_among_b(self, v, v_size):
'''
find_among_b is for backwards processing. Same comments apply
'''
i = 0
j = v_size
c = self.cursor
lb = self.limit_backward;
common_i = 0
common_j = 0
first_key_inspected = False
while True:
k = i + ((j - i) >> 1)
diff = 0
common = min(common_i, common_j)
w = v[k]
for i2 in range(w.s_size - 1 - common, -1, -1):
if c - common == lb:
diff = -1
break
diff = ord(self.current[c - 1 - common]) - ord(w.s[i2])
if diff != 0:
break
common += 1
if diff < 0:
j = k
common_j = common
else:
i = k
common_i = common
if j - i <= 1:
if i > 0:
break
if j == i:
break
if first_key_inspected:
break
first_key_inspected = True
while True:
w = v[i]
if common_i >= w.s_size:
self.cursor = c - w.s_size
if w.method is None:
return w.result
method = getattr(self, w.method)
res = method()
self.cursor = c - w.s_size
if res:
return w.result
i = w.substring_i
if i < 0:
return 0
return -1 # not reachable
def replace_s(self, c_bra, c_ket, s):
'''
to replace chars between c_bra and c_ket in self.current by the
chars in s.
@type c_bra int
@type c_ket int
@type s: string
'''
adjustment = len(s) - (c_ket - c_bra)
self.current = self.current[0:c_bra] + s + self.current[c_ket:]
self.limit += adjustment
if self.cursor >= c_ket:
self.cursor += adjustment
elif self.cursor > c_bra:
self.cursor = c_bra
return adjustment
def slice_check(self):
if self.bra < 0 or self.bra > self.ket or self.ket > self.limit or self.limit > len(self.current):
return False
return True
def slice_from(self, s):
'''
@type s string
'''
result = False
if self.slice_check():
self.replace_s(self.bra, self.ket, s)
result = True
return result
def slice_del(self):
return self.slice_from("")
def insert(self, c_bra, c_ket, s):
'''
@type c_bra int
@type c_ket int
@type s: string
'''
adjustment = self.replace_s(c_bra, c_ket, s)
if c_bra <= self.bra:
self.bra += adjustment
if c_bra <= self.ket:
self.ket += adjustment
def slice_to(self, s):
'''
Copy the slice into the supplied StringBuffer
@type s: string
'''
result = ''
if self.slice_check():
result = self.current[self.bra:self.ket]
return result
def assign_to(self, s):
'''
@type s: string
'''
return self.current[0:self.limit]
def _stem_word(self, word):
cache = self._cache.get(word)
if cache is None:
self.set_current(word)
self._stem()
result = self.get_current()
self._cache[word] = [result, self._counter]
else:
cache[1] = self._counter
result = cache[0]
self._counter += 1
return result
def _clear_cache(self):
removecount = int(len(self._cache) - self.maxCacheSize * 8 / 10)
oldcaches = sorted(self._cache.items(), key=lambda cache: cache[1][1])[0:removecount]
for key, value in oldcaches:
del self._cache[key]
def stemWord(self, word):
result = self._stem_word(word)
if len(self._cache) > self.maxCacheSize:
self._clear_cache()
return result
def stemWords(self, words):
result = [self._stem_word(word) for word in words]
if len(self._cache) > self.maxCacheSize:
self._clear_cache()
return result
|
gtaylor/ansible
|
refs/heads/devel
|
v2/ansible/plugins/lookup/redis_kv.py
|
69
|
# (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
HAVE_REDIS=False
try:
import redis # https://github.com/andymccurdy/redis-py/
HAVE_REDIS=True
except ImportError:
pass
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
# ==============================================================
# REDISGET: Obtain value from a GET on a Redis key. Terms
# expected: 0 = URL, 1 = Key
# URL may be empty, in which case redis://localhost:6379 assumed
# --------------------------------------------------------------
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
if not HAVE_REDIS:
raise AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed")
if not isinstance(terms, list):
terms = [ terms ]
ret = []
for term in terms:
(url,key) = term.split(',')
if url == "":
url = 'redis://localhost:6379'
# urlsplit on Python 2.6.1 is broken. Hmm. Probably also the reason
# Redis' from_url() doesn't work here.
p = '(?P<scheme>[^:]+)://?(?P<host>[^:/ ]+).?(?P<port>[0-9]*).*'
try:
m = re.search(p, url)
host = m.group('host')
port = int(m.group('port'))
except AttributeError:
raise AnsibleError("Bad URI in redis lookup")
try:
conn = redis.Redis(host=host, port=port)
res = conn.get(key)
if res is None:
res = ""
ret.append(res)
except:
ret.append("") # connection failed or key not found
return ret
|
Accelerite/cinder
|
refs/heads/master
|
cinder/tests/api/contrib/test_quotas_classes.py
|
14
|
# Copyright 2013 Huawei Technologies Co., Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for cinder.api.contrib.quota_classes.py
"""
from lxml import etree
import webob.exc
from cinder.api.contrib import quota_classes
from cinder import context
from cinder import quota
from cinder import test
from cinder.volume import volume_types
QUOTAS = quota.QUOTAS
def make_body(root=True, gigabytes=1000, snapshots=10,
volumes=10, backups=10,
backup_gigabytes=1000,
volume_types_faked=None,
tenant_id='foo'):
resources = {'gigabytes': gigabytes,
'snapshots': snapshots,
'volumes': volumes,
'backups': backups,
'backup_gigabytes': backup_gigabytes}
if not volume_types_faked:
volume_types_faked = {'fake_type': None}
for volume_type in volume_types_faked:
resources['gigabytes_' + volume_type] = -1
resources['snapshots_' + volume_type] = -1
resources['volumes_' + volume_type] = -1
if tenant_id:
resources['id'] = tenant_id
if root:
result = {'quota_class_set': resources}
else:
result = resources
return result
def make_response_body(root=True, ctxt=None, quota_class='foo',
request_body=None, tenant_id='foo'):
resources = {}
if not ctxt:
ctxt = context.get_admin_context()
resources.update(QUOTAS.get_class_quotas(ctxt, quota_class))
if not request_body and not request_body['quota_class_set']:
resources.update(request_body['quota_class_set'])
if tenant_id:
resources['id'] = tenant_id
if root:
result = {'quota_class_set': resources}
else:
result = resources
return result
class QuotaClassSetsControllerTest(test.TestCase):
def setUp(self):
super(QuotaClassSetsControllerTest, self).setUp()
self.controller = quota_classes.QuotaClassSetsController()
self.ctxt = context.get_admin_context()
self.req = self.mox.CreateMockAnything()
self.req.environ = {'cinder.context': self.ctxt}
self.req.environ['cinder.context'].is_admin = True
def test_show(self):
volume_types.create(self.ctxt, 'fake_type')
result = self.controller.show(self.req, 'foo')
self.assertDictMatch(result, make_body())
def test_show_not_authorized(self):
self.req.environ['cinder.context'].is_admin = False
self.req.environ['cinder.context'].user_id = 'bad_user'
self.req.environ['cinder.context'].project_id = 'bad_project'
self.assertRaises(webob.exc.HTTPForbidden, self.controller.show,
self.req, 'foo')
def test_update(self):
volume_types.create(self.ctxt, 'fake_type')
body = make_body(gigabytes=2000, snapshots=15,
volumes=5, tenant_id=None)
result = self.controller.update(self.req, 'foo', body)
self.assertDictMatch(result, body)
def test_update_wrong_key(self):
volume_types.create(self.ctxt, 'fake_type')
body = {'quota_class_set': {'bad': 'bad'}}
result = self.controller.update(self.req, 'foo', body)
self.assertDictMatch(result, make_body(tenant_id=None))
def test_update_invalid_key_value(self):
body = {'quota_class_set': {'gigabytes': "should_be_int"}}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, 'foo', body)
def test_update_bad_quota_limit(self):
body = {'quota_class_set': {'gigabytes': -1000}}
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
self.req, 'foo', body)
def test_update_no_admin(self):
self.req.environ['cinder.context'].is_admin = False
self.assertRaises(webob.exc.HTTPForbidden, self.controller.update,
self.req, 'foo', make_body(tenant_id=None))
def test_update_with_more_volume_types(self):
volume_types.create(self.ctxt, 'fake_type_1')
volume_types.create(self.ctxt, 'fake_type_2')
body = {'quota_class_set': {'gigabytes_fake_type_1': 1111,
'volumes_fake_type_2': 2222}}
result = self.controller.update(self.req, 'foo', body)
self.assertDictMatch(result, make_response_body(ctxt=self.ctxt,
quota_class='foo',
request_body=body,
tenant_id=None))
class QuotaClassesSerializerTest(test.TestCase):
def setUp(self):
super(QuotaClassesSerializerTest, self).setUp()
self.req = self.mox.CreateMockAnything()
self.req.environ = {'cinder.context': context.get_admin_context()}
def test_update_serializer(self):
serializer = quota_classes.QuotaClassTemplate()
quota_class_set = make_body(root=False)
text = serializer.serialize({'quota_class_set': quota_class_set})
tree = etree.fromstring(text)
self.assertEqual(tree.tag, 'quota_class_set')
self.assertEqual(tree.get('id'), quota_class_set['id'])
body = make_body(root=False, tenant_id=None)
for node in tree:
self.assertIn(node.tag, body)
self.assertEqual(str(body[node.tag]), node.text)
|
anadigi/paramiko
|
refs/heads/master
|
tests/test_sftp_big.py
|
25
|
# Copyright (C) 2003-2009 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
some unit tests to make sure sftp works well with large files.
a real actual sftp server is contacted, and a new folder is created there to
do test file operations in (so no existing files will be harmed).
"""
import os
import random
import struct
import sys
import time
import unittest
from paramiko.common import o660
from tests.test_sftp import get_sftp
FOLDER = os.environ.get('TEST_FOLDER', 'temp-testing000')
class BigSFTPTest (unittest.TestCase):
def setUp(self):
global FOLDER
sftp = get_sftp()
for i in range(1000):
FOLDER = FOLDER[:-3] + '%03d' % i
try:
sftp.mkdir(FOLDER)
break
except (IOError, OSError):
pass
def tearDown(self):
sftp = get_sftp()
sftp.rmdir(FOLDER)
def test_1_lots_of_files(self):
"""
create a bunch of files over the same session.
"""
sftp = get_sftp()
numfiles = 100
try:
for i in range(numfiles):
with sftp.open('%s/file%d.txt' % (FOLDER, i), 'w', 1) as f:
f.write('this is file #%d.\n' % i)
sftp.chmod('%s/file%d.txt' % (FOLDER, i), o660)
# now make sure every file is there, by creating a list of filenmes
# and reading them in random order.
numlist = list(range(numfiles))
while len(numlist) > 0:
r = numlist[random.randint(0, len(numlist) - 1)]
with sftp.open('%s/file%d.txt' % (FOLDER, r)) as f:
self.assertEqual(f.readline(), 'this is file #%d.\n' % r)
numlist.remove(r)
finally:
for i in range(numfiles):
try:
sftp.remove('%s/file%d.txt' % (FOLDER, i))
except:
pass
def test_2_big_file(self):
"""
write a 1MB file with no buffering.
"""
sftp = get_sftp()
kblob = (1024 * b'x')
start = time.time()
try:
with sftp.open('%s/hongry.txt' % FOLDER, 'w') as f:
for n in range(1024):
f.write(kblob)
if n % 128 == 0:
sys.stderr.write('.')
sys.stderr.write(' ')
self.assertEqual(sftp.stat('%s/hongry.txt' % FOLDER).st_size, 1024 * 1024)
end = time.time()
sys.stderr.write('%ds ' % round(end - start))
start = time.time()
with sftp.open('%s/hongry.txt' % FOLDER, 'r') as f:
for n in range(1024):
data = f.read(1024)
self.assertEqual(data, kblob)
end = time.time()
sys.stderr.write('%ds ' % round(end - start))
finally:
sftp.remove('%s/hongry.txt' % FOLDER)
def test_3_big_file_pipelined(self):
"""
write a 1MB file, with no linefeeds, using pipelining.
"""
sftp = get_sftp()
kblob = bytes().join([struct.pack('>H', n) for n in range(512)])
start = time.time()
try:
with sftp.open('%s/hongry.txt' % FOLDER, 'wb') as f:
f.set_pipelined(True)
for n in range(1024):
f.write(kblob)
if n % 128 == 0:
sys.stderr.write('.')
sys.stderr.write(' ')
self.assertEqual(sftp.stat('%s/hongry.txt' % FOLDER).st_size, 1024 * 1024)
end = time.time()
sys.stderr.write('%ds ' % round(end - start))
start = time.time()
with sftp.open('%s/hongry.txt' % FOLDER, 'rb') as f:
f.prefetch()
# read on odd boundaries to make sure the bytes aren't getting scrambled
n = 0
k2blob = kblob + kblob
chunk = 629
size = 1024 * 1024
while n < size:
if n + chunk > size:
chunk = size - n
data = f.read(chunk)
offset = n % 1024
self.assertEqual(data, k2blob[offset:offset + chunk])
n += chunk
end = time.time()
sys.stderr.write('%ds ' % round(end - start))
finally:
sftp.remove('%s/hongry.txt' % FOLDER)
def test_4_prefetch_seek(self):
sftp = get_sftp()
kblob = bytes().join([struct.pack('>H', n) for n in range(512)])
try:
with sftp.open('%s/hongry.txt' % FOLDER, 'wb') as f:
f.set_pipelined(True)
for n in range(1024):
f.write(kblob)
if n % 128 == 0:
sys.stderr.write('.')
sys.stderr.write(' ')
self.assertEqual(sftp.stat('%s/hongry.txt' % FOLDER).st_size, 1024 * 1024)
start = time.time()
k2blob = kblob + kblob
chunk = 793
for i in range(10):
with sftp.open('%s/hongry.txt' % FOLDER, 'rb') as f:
f.prefetch()
base_offset = (512 * 1024) + 17 * random.randint(1000, 2000)
offsets = [base_offset + j * chunk for j in range(100)]
# randomly seek around and read them out
for j in range(100):
offset = offsets[random.randint(0, len(offsets) - 1)]
offsets.remove(offset)
f.seek(offset)
data = f.read(chunk)
n_offset = offset % 1024
self.assertEqual(data, k2blob[n_offset:n_offset + chunk])
offset += chunk
end = time.time()
sys.stderr.write('%ds ' % round(end - start))
finally:
sftp.remove('%s/hongry.txt' % FOLDER)
def test_5_readv_seek(self):
sftp = get_sftp()
kblob = bytes().join([struct.pack('>H', n) for n in range(512)])
try:
with sftp.open('%s/hongry.txt' % FOLDER, 'wb') as f:
f.set_pipelined(True)
for n in range(1024):
f.write(kblob)
if n % 128 == 0:
sys.stderr.write('.')
sys.stderr.write(' ')
self.assertEqual(sftp.stat('%s/hongry.txt' % FOLDER).st_size, 1024 * 1024)
start = time.time()
k2blob = kblob + kblob
chunk = 793
for i in range(10):
with sftp.open('%s/hongry.txt' % FOLDER, 'rb') as f:
base_offset = (512 * 1024) + 17 * random.randint(1000, 2000)
# make a bunch of offsets and put them in random order
offsets = [base_offset + j * chunk for j in range(100)]
readv_list = []
for j in range(100):
o = offsets[random.randint(0, len(offsets) - 1)]
offsets.remove(o)
readv_list.append((o, chunk))
ret = f.readv(readv_list)
for i in range(len(readv_list)):
offset = readv_list[i][0]
n_offset = offset % 1024
self.assertEqual(next(ret), k2blob[n_offset:n_offset + chunk])
end = time.time()
sys.stderr.write('%ds ' % round(end - start))
finally:
sftp.remove('%s/hongry.txt' % FOLDER)
def test_6_lots_of_prefetching(self):
"""
prefetch a 1MB file a bunch of times, discarding the file object
without using it, to verify that paramiko doesn't get confused.
"""
sftp = get_sftp()
kblob = (1024 * b'x')
try:
with sftp.open('%s/hongry.txt' % FOLDER, 'w') as f:
f.set_pipelined(True)
for n in range(1024):
f.write(kblob)
if n % 128 == 0:
sys.stderr.write('.')
sys.stderr.write(' ')
self.assertEqual(sftp.stat('%s/hongry.txt' % FOLDER).st_size, 1024 * 1024)
for i in range(10):
with sftp.open('%s/hongry.txt' % FOLDER, 'r') as f:
f.prefetch()
with sftp.open('%s/hongry.txt' % FOLDER, 'r') as f:
f.prefetch()
for n in range(1024):
data = f.read(1024)
self.assertEqual(data, kblob)
if n % 128 == 0:
sys.stderr.write('.')
sys.stderr.write(' ')
finally:
sftp.remove('%s/hongry.txt' % FOLDER)
def test_7_prefetch_readv(self):
"""
verify that prefetch and readv don't conflict with each other.
"""
sftp = get_sftp()
kblob = bytes().join([struct.pack('>H', n) for n in range(512)])
try:
with sftp.open('%s/hongry.txt' % FOLDER, 'wb') as f:
f.set_pipelined(True)
for n in range(1024):
f.write(kblob)
if n % 128 == 0:
sys.stderr.write('.')
sys.stderr.write(' ')
self.assertEqual(sftp.stat('%s/hongry.txt' % FOLDER).st_size, 1024 * 1024)
with sftp.open('%s/hongry.txt' % FOLDER, 'rb') as f:
f.prefetch()
data = f.read(1024)
self.assertEqual(data, kblob)
chunk_size = 793
base_offset = 512 * 1024
k2blob = kblob + kblob
chunks = [(base_offset + (chunk_size * i), chunk_size) for i in range(20)]
for data in f.readv(chunks):
offset = base_offset % 1024
self.assertEqual(chunk_size, len(data))
self.assertEqual(k2blob[offset:offset + chunk_size], data)
base_offset += chunk_size
sys.stderr.write(' ')
finally:
sftp.remove('%s/hongry.txt' % FOLDER)
def test_8_large_readv(self):
"""
verify that a very large readv is broken up correctly and still
returned as a single blob.
"""
sftp = get_sftp()
kblob = bytes().join([struct.pack('>H', n) for n in range(512)])
try:
with sftp.open('%s/hongry.txt' % FOLDER, 'wb') as f:
f.set_pipelined(True)
for n in range(1024):
f.write(kblob)
if n % 128 == 0:
sys.stderr.write('.')
sys.stderr.write(' ')
self.assertEqual(sftp.stat('%s/hongry.txt' % FOLDER).st_size, 1024 * 1024)
with sftp.open('%s/hongry.txt' % FOLDER, 'rb') as f:
data = list(f.readv([(23 * 1024, 128 * 1024)]))
self.assertEqual(1, len(data))
data = data[0]
self.assertEqual(128 * 1024, len(data))
sys.stderr.write(' ')
finally:
sftp.remove('%s/hongry.txt' % FOLDER)
def test_9_big_file_big_buffer(self):
"""
write a 1MB file, with no linefeeds, and a big buffer.
"""
sftp = get_sftp()
mblob = (1024 * 1024 * 'x')
try:
with sftp.open('%s/hongry.txt' % FOLDER, 'w', 128 * 1024) as f:
f.write(mblob)
self.assertEqual(sftp.stat('%s/hongry.txt' % FOLDER).st_size, 1024 * 1024)
finally:
sftp.remove('%s/hongry.txt' % FOLDER)
def test_A_big_file_renegotiate(self):
"""
write a 1MB file, forcing key renegotiation in the middle.
"""
sftp = get_sftp()
t = sftp.sock.get_transport()
t.packetizer.REKEY_BYTES = 512 * 1024
k32blob = (32 * 1024 * 'x')
try:
with sftp.open('%s/hongry.txt' % FOLDER, 'w', 128 * 1024) as f:
for i in range(32):
f.write(k32blob)
self.assertEqual(sftp.stat('%s/hongry.txt' % FOLDER).st_size, 1024 * 1024)
self.assertNotEqual(t.H, t.session_id)
# try to read it too.
with sftp.open('%s/hongry.txt' % FOLDER, 'r', 128 * 1024) as f:
f.prefetch()
total = 0
while total < 1024 * 1024:
total += len(f.read(32 * 1024))
finally:
sftp.remove('%s/hongry.txt' % FOLDER)
t.packetizer.REKEY_BYTES = pow(2, 30)
if __name__ == '__main__':
from tests.test_sftp import SFTPTest
SFTPTest.init_loopback()
from unittest import main
main()
|
kawamon/hue
|
refs/heads/master
|
desktop/core/ext-py/django-nose-1.4.5/docs/conf.py
|
6
|
# -*- coding: utf-8 -*-
"""django-nose build configuration file.
Created by sphinx-quickstart on Mon Jul 21 13:24:51 2014.
This file is execfile()d with the current directory set to its
containing dir.
Note that not all possible configuration values are present in this
autogenerated file.
All configuration values have a default; values that are commented out
serve to show the default.
"""
from __future__ import unicode_literals
from datetime import date
import sys
import os
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testapp.settings")
from django_nose import __version__ # flake8: noqa
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'django-nose'
copyright = (
'2010-%d, Jeff Balogh and the django-nose team.' % date.today().year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-nose-doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'django-nose.tex', 'django-nose Documentation',
'Jeff Balogh and the django-nose team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-nose', 'django-nose Documentation',
['Jeff Balogh', 'the django-nose team'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-nose', 'django-nose Documentation',
'Jeff Balogh and the django-nose team', 'django-nose',
'Makes your Django tests simple and snappy')
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
|
caronc/nzb-subliminal
|
refs/heads/master
|
Subliminal/dogpile/core/__init__.py
|
60
|
from .dogpile import NeedRegenerationException, Lock
from .nameregistry import NameRegistry
from .readwrite_lock import ReadWriteMutex
from .legacy import Dogpile, SyncReaderDogpile
__all__ = [
'Dogpile', 'SyncReaderDogpile', 'NeedRegenerationException',
'NameRegistry', 'ReadWriteMutex', 'Lock']
__version__ = '0.4.1'
|
waldenner/robotframework
|
refs/heads/master
|
atest/testdata/test_libraries/ThreadLoggingLib.py
|
38
|
import threading
import logging
import time
from robot.api import logger
def log_using_robot_api_in_thread():
threading.Timer(0.1, log_using_robot_api).start()
def log_using_robot_api():
for i in range(100):
logger.info(str(i))
time.sleep(0.01)
def log_using_logging_module_in_thread():
threading.Timer(0.1, log_using_logging_module).start()
def log_using_logging_module():
for i in range(100):
logging.info(str(i))
time.sleep(0.01)
|
yajnab/android_kernel_htc_magnids
|
refs/heads/ics
|
tools/perf/scripts/python/netdev-times.py
|
11271
|
# Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
|
lthurlow/scaling-computing-machine
|
refs/heads/master
|
netaddr/strategy/ipv4.py
|
9
|
#-----------------------------------------------------------------------------
# Copyright (c) 2008-2015, David P. D. Moss. All rights reserved.
#
# Released under the BSD license. See the LICENSE file for details.
#-----------------------------------------------------------------------------
"""IPv4 address logic."""
import sys as _sys
import struct as _struct
from socket import inet_aton as _inet_aton
# Check whether we need to use fallback code or not.
if _sys.platform in ('win32', 'cygwin'):
# inet_pton() not available on Windows. inet_pton() under cygwin
# behaves exactly like inet_aton() and is therefore highly unreliable.
from netaddr.fbsocket import inet_pton as _inet_pton, AF_INET
else:
# All other cases, use all functions from the socket module.
from socket import inet_pton as _inet_pton, AF_INET
from netaddr.core import AddrFormatError, ZEROFILL, INET_PTON
from netaddr.strategy import (
valid_words as _valid_words, valid_bits as _valid_bits,
bits_to_int as _bits_to_int, int_to_bits as _int_to_bits,
valid_bin as _valid_bin, int_to_bin as _int_to_bin,
bin_to_int as _bin_to_int)
from netaddr.compat import _str_type
#: The width (in bits) of this address type.
width = 32
#: The individual word size (in bits) of this address type.
word_size = 8
#: The format string to be used when converting words to string values.
word_fmt = '%d'
#: The separator character used between each word.
word_sep = '.'
#: The AF_* constant value of this address type.
family = AF_INET
#: A friendly string name address type.
family_name = 'IPv4'
#: The version of this address type.
version = 4
#: The number base to be used when interpreting word values as integers.
word_base = 10
#: The maximum integer value that can be represented by this address type.
max_int = 2 ** width - 1
#: The number of words in this address type.
num_words = width // word_size
#: The maximum integer value for an individual word in this address type.
max_word = 2 ** word_size - 1
#: A dictionary mapping IPv4 CIDR prefixes to the equivalent netmasks.
prefix_to_netmask = dict(
[(i, max_int ^ (2 ** (width - i) - 1)) for i in range(0, width + 1)])
#: A dictionary mapping IPv4 netmasks to their equivalent CIDR prefixes.
netmask_to_prefix = dict(
[(max_int ^ (2 ** (width - i) - 1), i) for i in range(0, width + 1)])
#: A dictionary mapping IPv4 CIDR prefixes to the equivalent hostmasks.
prefix_to_hostmask = dict(
[(i, (2 ** (width - i) - 1)) for i in range(0, width + 1)])
#: A dictionary mapping IPv4 hostmasks to their equivalent CIDR prefixes.
hostmask_to_prefix = dict(
[((2 ** (width - i) - 1), i) for i in range(0, width + 1)])
def valid_str(addr, flags=0):
"""
:param addr: An IPv4 address in presentation (string) format.
:param flags: decides which rules are applied to the interpretation of the
addr value. Supported constants are INET_PTON and ZEROFILL. See the
netaddr.core docs for details.
:return: ``True`` if IPv4 address is valid, ``False`` otherwise.
"""
if addr == '':
raise AddrFormatError('Empty strings are not supported!')
validity = True
if flags & ZEROFILL:
addr = '.'.join(['%d' % int(i) for i in addr.split('.')])
try:
if flags & INET_PTON:
_inet_pton(AF_INET, addr)
else:
_inet_aton(addr)
except Exception:
validity = False
return validity
def str_to_int(addr, flags=0):
"""
:param addr: An IPv4 dotted decimal address in string form.
:param flags: decides which rules are applied to the interpretation of the
addr value. Supported constants are INET_PTON and ZEROFILL. See the
netaddr.core docs for details.
:return: The equivalent unsigned integer for a given IPv4 address.
"""
if flags & ZEROFILL:
addr = '.'.join(['%d' % int(i) for i in addr.split('.')])
try:
if flags & INET_PTON:
return _struct.unpack('>I', _inet_pton(AF_INET, addr))[0]
else:
return _struct.unpack('>I', _inet_aton(addr))[0]
except Exception:
raise AddrFormatError('%r is not a valid IPv4 address string!' % addr)
def int_to_str(int_val, dialect=None):
"""
:param int_val: An unsigned integer.
:param dialect: (unused) Any value passed in is ignored.
:return: The IPv4 presentation (string) format address equivalent to the
unsigned integer provided.
"""
if 0 <= int_val <= max_int:
return '%d.%d.%d.%d' % (
int_val >> 24,
(int_val >> 16) & 0xff,
(int_val >> 8) & 0xff,
int_val & 0xff)
else:
raise ValueError('%r is not a valid 32-bit unsigned integer!' % int_val)
def int_to_arpa(int_val):
"""
:param int_val: An unsigned integer.
:return: The reverse DNS lookup for an IPv4 address in network byte
order integer form.
"""
words = ["%d" % i for i in int_to_words(int_val)]
words.reverse()
words.extend(['in-addr', 'arpa', ''])
return '.'.join(words)
def int_to_packed(int_val):
"""
:param int_val: the integer to be packed.
:return: a packed string that is equivalent to value represented by an
unsigned integer.
"""
return _struct.pack('>I', int_val)
def packed_to_int(packed_int):
"""
:param packed_int: a packed string containing an unsigned integer.
It is assumed that string is packed in network byte order.
:return: An unsigned integer equivalent to value of network address
represented by packed binary string.
"""
return _struct.unpack('>I', packed_int)[0]
def valid_words(words):
return _valid_words(words, word_size, num_words)
def int_to_words(int_val):
"""
:param int_val: An unsigned integer.
:return: An integer word (octet) sequence that is equivalent to value
represented by an unsigned integer.
"""
if not 0 <= int_val <= max_int:
raise ValueError('%r is not a valid integer value supported by'
'this address type!' % int_val)
return ( int_val >> 24,
(int_val >> 16) & 0xff,
(int_val >> 8) & 0xff,
int_val & 0xff)
def words_to_int(words):
"""
:param words: A list or tuple containing integer octets.
:return: An unsigned integer that is equivalent to value represented
by word (octet) sequence.
"""
if not valid_words(words):
raise ValueError('%r is not a valid octet list for an IPv4 address!' % words)
return _struct.unpack('>I', _struct.pack('4B', *words))[0]
def valid_bits(bits):
return _valid_bits(bits, width, word_sep)
def bits_to_int(bits):
return _bits_to_int(bits, width, word_sep)
def int_to_bits(int_val, word_sep=None):
if word_sep is None:
word_sep = globals()['word_sep']
return _int_to_bits(int_val, word_size, num_words, word_sep)
def valid_bin(bin_val):
return _valid_bin(bin_val, width)
def int_to_bin(int_val):
return _int_to_bin(int_val, width)
def bin_to_int(bin_val):
return _bin_to_int(bin_val, width)
def expand_partial_address(addr):
"""
Expands a partial IPv4 address into a full 4-octet version.
:param addr: an partial or abbreviated IPv4 address
:return: an expanded IP address in presentation format (x.x.x.x)
"""
tokens = []
error = AddrFormatError('invalid partial IPv4 address: %r!' % addr)
if isinstance(addr, _str_type):
if ':' in addr:
# Ignore IPv6 ...
raise error
try:
if '.' in addr:
tokens = ['%d' % int(o) for o in addr.split('.')]
else:
tokens = ['%d' % int(addr)]
except ValueError:
raise error
if 1 <= len(tokens) <= 4:
for i in range(4 - len(tokens)):
tokens.append('0')
else:
raise error
if not tokens:
raise error
return '%s.%s.%s.%s' % tuple(tokens)
|
spring-week-topos/nova-week
|
refs/heads/spring-week
|
nova/virt/netutils.py
|
16
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Network-related utilities for supporting libvirt connection code."""
import os
import jinja2
import netaddr
from oslo.config import cfg
from nova.network import model
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
CONF.import_opt('injected_network_template', 'nova.virt.disk.api')
def get_net_and_mask(cidr):
net = netaddr.IPNetwork(cidr)
return str(net.ip), str(net.netmask)
def get_net_and_prefixlen(cidr):
net = netaddr.IPNetwork(cidr)
return str(net.ip), str(net._prefixlen)
def get_ip_version(cidr):
net = netaddr.IPNetwork(cidr)
return int(net.version)
def _get_first_network(network, version):
# Using a generator expression with a next() call for the first element
# of a list since we don't want to evaluate the whole list as we can
# have a lot of subnets
try:
return (i for i in network['subnets']
if i['version'] == version).next()
except StopIteration:
pass
def get_injected_network_template(network_info, use_ipv6=CONF.use_ipv6,
template=CONF.injected_network_template):
"""Returns a rendered network template for the given network_info.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param use_ipv6: If False, do not return IPv6 template information
even if an IPv6 subnet is present in network_info.
:param template: Path to the interfaces template file.
"""
if not (network_info and template):
return
nets = []
ifc_num = -1
ipv6_is_available = False
for vif in network_info:
if not vif['network'] or not vif['network']['subnets']:
continue
network = vif['network']
# NOTE(bnemec): The template only supports a single subnet per
# interface and I'm not sure how/if that can be fixed, so this
# code only takes the first subnet of the appropriate type.
subnet_v4 = _get_first_network(network, 4)
subnet_v6 = _get_first_network(network, 6)
ifc_num += 1
if not network.get_meta('injected'):
continue
address = None
netmask = None
gateway = ''
broadcast = None
dns = None
if subnet_v4:
if subnet_v4.get_meta('dhcp_server') is not None:
continue
if subnet_v4['ips']:
ip = subnet_v4['ips'][0]
address = ip['address']
netmask = model.get_netmask(ip, subnet_v4)
if subnet_v4['gateway']:
gateway = subnet_v4['gateway']['address']
broadcast = str(subnet_v4.as_netaddr().broadcast)
dns = ' '.join([i['address'] for i in subnet_v4['dns']])
address_v6 = None
gateway_v6 = ''
netmask_v6 = None
have_ipv6 = (use_ipv6 and subnet_v6)
if have_ipv6:
if subnet_v6.get_meta('dhcp_server') is not None:
continue
if subnet_v6['ips']:
ipv6_is_available = True
ip_v6 = subnet_v6['ips'][0]
address_v6 = ip_v6['address']
netmask_v6 = model.get_netmask(ip_v6, subnet_v6)
if subnet_v6['gateway']:
gateway_v6 = subnet_v6['gateway']['address']
net_info = {'name': 'eth%d' % ifc_num,
'address': address,
'netmask': netmask,
'gateway': gateway,
'broadcast': broadcast,
'dns': dns,
'address_v6': address_v6,
'gateway_v6': gateway_v6,
'netmask_v6': netmask_v6,
}
nets.append(net_info)
if not nets:
return
return build_template(template, nets, ipv6_is_available)
def build_template(template, nets, ipv6_is_available):
tmpl_path, tmpl_file = os.path.split(CONF.injected_network_template)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path))
template = env.get_template(tmpl_file)
return template.render({'interfaces': nets,
'use_ipv6': ipv6_is_available})
|
sanyaade-teachings/gyp
|
refs/heads/master
|
test/rules-rebuild/src/make-sources.py
|
337
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
assert len(sys.argv) == 4, sys.argv
(in_file, c_file, h_file) = sys.argv[1:]
def write_file(filename, contents):
open(filename, 'wb').write(contents)
write_file(c_file, open(in_file, 'rb').read())
write_file(h_file, '#define NAME "%s"\n' % in_file)
sys.exit(0)
|
ramadhane/odoo
|
refs/heads/8.0
|
addons/hr_expense/tests/test_journal_entries.py
|
251
|
from openerp.tests.common import TransactionCase
from openerp import netsvc, workflow
class TestCheckJournalEntry(TransactionCase):
"""
Check journal entries when the expense product is having tax which is tax included.
"""
def setUp(self):
super(TestCheckJournalEntry, self).setUp()
cr, uid = self.cr, self.uid
self.expense_obj = self.registry('hr.expense.expense')
self.exp_line_obj = self.registry('hr.expense.line')
self.product_obj = self.registry('product.product')
self.tax_obj = self.registry('account.tax')
self.code_obj = self.registry('account.tax.code')
_, self.product_id = self.registry("ir.model.data").get_object_reference(cr, uid, "hr_expense", "air_ticket")
_, self.employee_id = self.registry("ir.model.data").get_object_reference(cr, uid, "hr", "employee_mit")
self.base_code_id = self.code_obj.create(cr, uid, {'name': 'Expense Base Code'})
self.tax_id = self.tax_obj.create(cr, uid, {
'name': 'Expense 10%',
'amount': 0.10,
'type': 'percent',
'type_tax_use': 'purchase',
'price_include': True,
'base_code_id': self.base_code_id,
'base_sign': -1,
})
self.product_obj.write(cr, uid, self.product_id, {'supplier_taxes_id': [(6, 0, [self.tax_id])]})
self.expense_id = self.expense_obj.create(cr, uid, {
'name': 'Expense for Minh Tran',
'employee_id': self.employee_id,
})
self.exp_line_obj.create(cr, uid, {
'name': 'Car Travel Expenses',
'product_id': self.product_id,
'unit_amount': 700.00,
'expense_id': self.expense_id
})
def test_journal_entry(self):
cr, uid = self.cr, self.uid
#Submit to Manager
workflow.trg_validate(uid, 'hr.expense.expense', self.expense_id, 'confirm', cr)
#Approve
workflow.trg_validate(uid, 'hr.expense.expense', self.expense_id, 'validate', cr)
#Create Expense Entries
workflow.trg_validate(uid, 'hr.expense.expense', self.expense_id, 'done', cr)
self.expense = self.expense_obj.browse(cr, uid, self.expense_id)
self.assertEquals(self.expense.state, 'done', 'Expense is not in Waiting Payment state')
self.assertTrue(self.expense.account_move_id.id, 'Expense Journal Entry is not created')
for line in self.expense.account_move_id.line_id:
if line.credit:
self.assertEquals(line.credit, 700.00, 'Expense Payable Amount is not matched for journal item')
else:
if line.tax_code_id:
self.assertEquals(line.debit, 636.36, 'Tax Amount is not matched for journal item')
else:
self.assertEquals(line.debit, 63.64, 'Tax Base Amount is not matched for journal item')
|
crichardson17/starburst_atlas
|
refs/heads/master
|
Low_resolution_sims/Dusty_LowRes/Geneva_inst_NoRot/Geneva_inst_NoRot_2/fullgrid/UV2.py
|
31
|
import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith("1.grd"):
gridfile1 = file
for file in os.listdir('.'):
if file.endswith("2.grd"):
gridfile2 = file
for file in os.listdir('.'):
if file.endswith("3.grd"):
gridfile3 = file
# ------------------------
for file in os.listdir('.'):
if file.endswith("1.txt"):
Elines1 = file
for file in os.listdir('.'):
if file.endswith("2.txt"):
Elines2 = file
for file in os.listdir('.'):
if file.endswith("3.txt"):
Elines3 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid1 = [];
grid2 = [];
grid3 = [];
with open(gridfile1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid1.append(row);
grid1 = asarray(grid1)
with open(gridfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid2.append(row);
grid2 = asarray(grid2)
with open(gridfile3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid3.append(row);
grid3 = asarray(grid3)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines1 = [];
dataEmissionlines2 = [];
dataEmissionlines3 = [];
with open(Elines1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines1.append(row);
dataEmissionlines1 = asarray(dataEmissionlines1)
with open(Elines2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers2 = csvReader.next()
for row in csvReader:
dataEmissionlines2.append(row);
dataEmissionlines2 = asarray(dataEmissionlines2)
with open(Elines3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers3 = csvReader.next()
for row in csvReader:
dataEmissionlines3.append(row);
dataEmissionlines3 = asarray(dataEmissionlines3)
print "import files complete"
# ---------------------------------------------------
#for concatenating grid
#pull the phi and hdens values from each of the runs. exclude header lines
grid1new = zeros((len(grid1[:,0])-1,2))
grid1new[:,0] = grid1[1:,6]
grid1new[:,1] = grid1[1:,7]
grid2new = zeros((len(grid2[:,0])-1,2))
x = array(17.00000)
grid2new[:,0] = repeat(x,len(grid2[:,0])-1)
grid2new[:,1] = grid2[1:,6]
grid3new = zeros((len(grid3[:,0])-1,2))
grid3new[:,0] = grid3[1:,6]
grid3new[:,1] = grid3[1:,7]
grid = concatenate((grid1new,grid2new,grid3new))
hdens_values = grid[:,1]
phi_values = grid[:,0]
# ---------------------------------------------------
#for concatenating Emission lines data
Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:]))
#for lines
headers = headers[1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
#change desired lines here!
line = [18, #1549
19, #1640
20, #1665
21, #1671
23, #1750
24, #1860
25, #1888
26, #1907
27, #2297
28, #2321
29, #2471
30, #2326
31, #2335
32, #2665
33, #2798
34] #2803
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Dusty UV Lines Continued", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('Dusty_UV_Lines_cntd.pdf')
plt.clf()
print "figure saved"
|
klmitch/nova
|
refs/heads/master
|
nova/db/sqlalchemy/migrate_repo/versions/268_add_host_in_compute_node.py
|
81
|
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate import UniqueConstraint
from sqlalchemy import MetaData, Table, Column, String
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# Add a new column host
compute_nodes = Table('compute_nodes', meta, autoload=True)
shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True)
# NOTE(sbauza) : Old compute nodes can report stats without this field, we
# need to set it as nullable
host = Column('host', String(255), nullable=True)
if not hasattr(compute_nodes.c, 'host'):
compute_nodes.create_column(host)
if not hasattr(shadow_compute_nodes.c, 'host'):
shadow_compute_nodes.create_column(host.copy())
# NOTE(sbauza) : Populate the host field with the value from the services
# table will be done at the ComputeNode object level when save()
ukey = UniqueConstraint('host', 'hypervisor_hostname', table=compute_nodes,
name="uniq_compute_nodes0host0hypervisor_hostname")
ukey.create()
|
lehmannro/translate
|
refs/heads/master
|
storage/properties.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2004-2006 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""classes that hold units of .properties files (propunit) or entire files
(propfile) these files are used in translating Mozilla and other software
The following U{.properties file
description<http://java.sun.com/j2se/1.4.2/docs/api/java/util/Properties.html#load(java.io.InputStream)>}
and U{example <http://www.exampledepot.com/egs/java.util/Props.html>} give some
good references to the .properties specification.
Properties file may also hold Java
U{MessageFormat<http://java.sun.com/j2se/1.4.2/docs/api/java/text/MessageFormat.html>}
messages. No special handling is provided in this storage class for MessageFormat,
but this may be implemented in future.
Implementation
==============
A simple summary of what is permissible follows.
Comments::
# a comment
! a comment
Name and Value pairs::
# Note that the b and c are escaped for epydoc rendering
a = a string
d.e.f = another string
b = a string with escape sequences \\t \\n \\r \\\\ \\" \\' \\ (space) \u0123
c = a string with a continuation line \\
continuation line
"""
from translate.storage import base
from translate.misc import quote
from translate.lang import data
import re
# the rstripeols convert dos <-> unix nicely as well
# output will be appropriate for the platform
eol = "\n"
def find_delimiter(line):
"""Find the type and position of the delimiter in a property line.
Property files can be delimeted by "=", ":" or whitespace (space for now).
We find the position of each delimiter, then find the one that appears
first.
@param line: A properties line
@type line: str
@return: delimiter character and offset within L{line}
@rtype: Tuple (delimiter char, Offset Integer)
"""
delimiters = {"=": -1, ":": -1, " ": -1}
# Find the position of each delimiter type
for delimiter, pos in delimiters.iteritems():
prewhitespace = len(line) - len(line.lstrip())
pos = line.find(delimiter, prewhitespace)
while pos != -1:
if delimiters[delimiter] == -1 and line[pos-1] != "\\":
delimiters[delimiter] = pos
break
pos = line.find(delimiter, pos+1)
# Find the first "=" or ":" delimiter
mindelimiter = None
minpos = -1
for delimiter, pos in delimiters.iteritems():
if pos == -1 or delimiter == " ":
continue
if minpos == -1 or pos < minpos:
minpos = pos
mindelimiter = delimiter
if mindelimiter is None and delimiters[" "] != -1:
# Use space delimiter if we found nothing else
return (" ", delimiters[" "])
if mindelimiter is not None and delimiters[" "] < delimiters[mindelimiter]:
# If space delimiter occurs earlier then ":" or "=" then it is the
# delimiter only if there are non-whitespace characters between it and
# the other detected delimiter.
if len(line[delimiters[" "]:delimiters[mindelimiter]].strip()) > 0:
return (" ", delimiters[" "])
return (mindelimiter, minpos)
def find_delimeter(line):
"""Spelling error that is kept around for in case someone relies on it.
Deprecated."""
raise DeprecationWarning
return find_delimiter(line)
def is_line_continuation(line):
"""Determine whether L{line} has a line continuation marker.
.properties files can be terminated with a backslash (\\) indicating
that the 'value' continues on the next line. Continuation is only
valid if there are an odd number of backslashses (an even number
would result in a set of N/2 slashes not an escape)
@param line: A properties line
@type line: str
@return: Does L{line} end with a line continuation
@rtype: Boolean
"""
pos = -1
count = 0
if len(line) == 0:
return False
# Count the slashes from the end of the line. Ensure we don't
# go into infinite loop.
while len(line) >= -pos and line[pos:][0] == "\\":
pos -= 1
count += 1
return (count % 2) == 1 # Odd is a line continuation, even is not
def key_strip(key):
"""Cleanup whitespace found around a key
@param key: A properties key
@type key: str
@return: Key without any uneeded whitespace
@rtype: str
"""
newkey = key.rstrip()
# If line now end in \ we put back the whitespace that was escaped
if newkey[-1:] == "\\":
newkey += key[len(newkey):len(newkey)+1]
return newkey.lstrip()
default_encoding = {"java": "latin1", "mozilla": "utf-8", "skype": "utf-16"}
class propunit(base.TranslationUnit):
"""an element of a properties file i.e. a name and value, and any comments
associated"""
def __init__(self, source="", personality="java"):
"""construct a blank propunit"""
self.personality = personality
super(propunit, self).__init__(source)
self.name = ""
self.value = u""
self.translation = u""
self.delimiter = u"="
self.comments = []
self.source = source
def setsource(self, source):
self._rich_source = None
source = data.forceunicode(source)
if self.personality == "mozilla" or self.personality == "skype":
self.value = quote.mozillapropertiesencode(source or u"")
else:
self.value = quote.javapropertiesencode(source or u"")
def getsource(self):
value = quote.propertiesdecode(self.value)
value = re.sub(u"\\\\ ", u" ", value)
return value
source = property(getsource, setsource)
def settarget(self, target):
self._rich_target = None
target = data.forceunicode(target)
if self.personality == "mozilla" or self.personality == "skype":
self.translation = quote.mozillapropertiesencode(target or u"")
else:
self.translation = quote.javapropertiesencode(target or u"")
def gettarget(self):
translation = quote.propertiesdecode(self.translation)
translation = re.sub(u"\\\\ ", u" ", translation)
return translation
target = property(gettarget, settarget)
def __str__(self):
"""convert to a string. double check that unicode is handled somehow here"""
source = self.getoutput()
if isinstance(source, unicode):
return source.encode(default_encoding[self.personality])
return source
def getoutput(self):
"""convert the element back into formatted lines for a .properties file"""
notes = self.getnotes()
if notes:
notes += u"\n"
if self.isblank():
return notes
else:
if "\\u" in self.value and self.personality == "mozilla":
self.value = quote.mozillapropertiesencode(self.source)
if "\\u" in self.translation and self.personality == "mozilla":
self.translation = quote.mozillapropertiesencode(self.target)
value = self.translation or self.value
return u"%s%s%s%s\n" % (notes, self.name, self.delimiter, value)
def getlocations(self):
return [self.name]
def addnote(self, text, origin=None, position="append"):
if origin in ['programmer', 'developer', 'source code', None]:
text = data.forceunicode(text)
self.comments.append(text)
else:
return super(propunit, self).addnote(text, origin=origin, position=position)
def getnotes(self, origin=None):
if origin in ['programmer', 'developer', 'source code', None]:
return u'\n'.join(self.comments)
else:
return super(propunit, self).getnotes(origin)
def removenotes(self):
self.comments = []
def isblank(self):
"""returns whether this is a blank element, containing only comments..."""
return not (self.name or self.value)
def istranslatable(self):
return bool(self.name)
def getid(self):
return self.name
class propfile(base.TranslationStore):
"""this class represents a .properties file, made up of propunits"""
UnitClass = propunit
def __init__(self, inputfile=None, personality="java"):
"""construct a propfile, optionally reading in from inputfile"""
super(propfile, self).__init__(unitclass = self.UnitClass)
self.filename = getattr(inputfile, 'name', '')
if inputfile is not None:
propsrc = inputfile.read()
inputfile.close()
self.parse(propsrc, personality)
def parse(self, propsrc, personality="java"):
"""read the source of a properties file in and include them as units"""
newunit = propunit("", personality)
inmultilinevalue = False
propsrc = unicode(propsrc, default_encoding[personality])
for line in propsrc.split(u"\n"):
# handle multiline value if we're in one
line = quote.rstripeol(line)
if inmultilinevalue:
newunit.value += line.lstrip()
# see if there's more
inmultilinevalue = is_line_continuation(newunit.value)
# if we're still waiting for more...
if inmultilinevalue:
# strip the backslash
newunit.value = newunit.value[:-1]
if not inmultilinevalue:
# we're finished, add it to the list...
self.addunit(newunit)
newunit = propunit("", personality)
# otherwise, this could be a comment
elif line.strip()[:1] in (u'#', u'!'):
# add a comment
newunit.comments.append(line)
elif not line.strip():
# this is a blank line...
if str(newunit).strip():
self.addunit(newunit)
newunit = propunit("", personality)
else:
delimiter_char, delimiter_pos = find_delimiter(line)
if delimiter_pos == -1:
continue
# otherwise, this is a definition
else:
newunit.delimiter = delimiter_char
newunit.name = key_strip(line[:delimiter_pos])
newunit.value = line[delimiter_pos+1:].lstrip()
# backslash at end means carry string on to next line
if is_line_continuation(newunit.value):
inmultilinevalue = True
newunit.value = newunit.value[:-1]
else:
self.addunit(newunit)
newunit = propunit("", personality)
# see if there is a leftover one...
if inmultilinevalue or len(newunit.comments) > 0:
self.addunit(newunit)
def __str__(self):
"""convert the units back to lines"""
lines = []
for unit in self.units:
lines.append(str(unit))
return "".join(lines)
|
atacai/server-tools
|
refs/heads/8.0
|
__unported__/configuration_helper/__init__.py
|
65
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: David BEAL
# Copyright 2014 Akretion
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import config # noqa
|
micha-shepher/oervoer-wizard
|
refs/heads/master
|
oervoer-django/oervoer/src/wizard/tests.py
|
3
|
'''
Created on Apr 14, 2015
@author: mshepher
'''
from django.test import TestCase
from wizard.brains.oervoer.oervoer import Oervoer
class OervoerTest(TestCase):
def testLoad(self):
oer = Oervoer(None,None,None)
oer.parse_products()
assert(len(oer.prodlists)>0)
assert(len(oer.ordlist)>0)
oer.process_order(oer.ordlist[0])
|
jumpstarter-io/neutron
|
refs/heads/master
|
neutron/plugins/nec/router_drivers.py
|
8
|
# Copyright 2013 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Akihiro Motoki
import abc
import httplib
import six
from neutron.common import log as call_log
from neutron.common import utils
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.plugins.nec.common import constants as nconst
from neutron.plugins.nec.common import exceptions as nexc
LOG = logging.getLogger(__name__)
PROVIDER_OPENFLOW = nconst.ROUTER_PROVIDER_OPENFLOW
@six.add_metaclass(abc.ABCMeta)
class RouterDriverBase(object):
def __init__(self, plugin, ofc_manager):
self.plugin = plugin
self.ofc = ofc_manager
def floating_ip_support(self):
return True
@abc.abstractmethod
def create_router(self, context, tenant_id, router):
pass
@abc.abstractmethod
def update_router(self, context, router_id, old_router, new_router):
pass
@abc.abstractmethod
def delete_router(self, context, router_id, router):
pass
@abc.abstractmethod
def add_interface(self, context, router_id, port):
pass
@abc.abstractmethod
def delete_interface(self, context, router_id, port):
pass
class RouterL3AgentDriver(RouterDriverBase):
need_gw_info = False
@call_log.log
def create_router(self, context, tenant_id, router):
return router
@call_log.log
def update_router(self, context, router_id, old_router, new_router):
return new_router
@call_log.log
def delete_router(self, context, router_id, router):
pass
@call_log.log
def add_interface(self, context, router_id, port):
return self.plugin.activate_port_if_ready(context, port)
@call_log.log
def delete_interface(self, context, router_id, port):
return self.plugin.deactivate_port(context, port)
class RouterOpenFlowDriver(RouterDriverBase):
need_gw_info = True
def floating_ip_support(self):
return self.ofc.driver.router_nat_supported
def _process_gw_port(self, gw_info, routes):
if gw_info and gw_info['gateway_ip']:
routes.append({'destination': '0.0.0.0/0',
'nexthop': gw_info['gateway_ip']})
@call_log.log
def create_router(self, context, tenant_id, router):
try:
router_id = router['id']
added_routes = []
self.ofc.ensure_ofc_tenant(context, tenant_id)
self.ofc.create_ofc_router(context, tenant_id, router_id,
router['name'])
self._process_gw_port(router['gw_port'], added_routes)
if added_routes:
self.ofc.update_ofc_router_route(context, router_id,
added_routes, [])
new_status = nconst.ROUTER_STATUS_ACTIVE
self.plugin._update_resource_status(context, "router",
router['id'],
new_status)
router['status'] = new_status
return router
except (nexc.OFCException, nexc.OFCMappingNotFound) as exc:
with excutils.save_and_reraise_exception():
if (isinstance(exc, nexc.OFCException) and
exc.status == httplib.CONFLICT):
raise nexc.RouterOverLimit(provider=PROVIDER_OPENFLOW)
reason = _("create_router() failed due to %s") % exc
LOG.error(reason)
new_status = nconst.ROUTER_STATUS_ERROR
self._update_resource_status(context, "router",
router['id'],
new_status)
@call_log.log
def update_router(self, context, router_id, old_router, new_router):
old_routes = old_router['routes'][:]
new_routes = new_router['routes'][:]
self._process_gw_port(old_router['gw_port'], old_routes)
self._process_gw_port(new_router['gw_port'], new_routes)
added, removed = utils.diff_list_of_dict(old_routes, new_routes)
if added or removed:
try:
# NOTE(amotoki): PFC supports one-by-one route update at now.
# It means there may be a case where some route is updated but
# some not. To allow the next call of failures to sync routes
# with Neutron side, we pass the whole new routes here.
# PFC should support atomic route update in the future.
self.ofc.update_ofc_router_route(context, router_id,
new_routes)
new_status = nconst.ROUTER_STATUS_ACTIVE
self.plugin._update_resource_status(
context, "router", router_id, new_status)
new_router['status'] = new_status
except (nexc.OFCException, nexc.OFCMappingNotFound) as exc:
with excutils.save_and_reraise_exception():
reason = _("_update_ofc_routes() failed due to %s") % exc
LOG.error(reason)
new_status = nconst.ROUTER_STATUS_ERROR
self.plugin._update_resource_status(
context, "router", router_id, new_status)
return new_router
@call_log.log
def delete_router(self, context, router_id, router):
if not self.ofc.exists_ofc_router(context, router_id):
return
try:
self.ofc.delete_ofc_router(context, router_id, router)
except (nexc.OFCException, nexc.OFCMappingNotFound) as exc:
with excutils.save_and_reraise_exception():
LOG.error(_("delete_router() failed due to %s"), exc)
self.plugin._update_resource_status(
context, "router", router_id, nconst.ROUTER_STATUS_ERROR)
@call_log.log
def add_interface(self, context, router_id, port):
port_id = port['id']
# port['fixed_ips'] may be empty if ext_net has no subnet.
# Such port is invalid for a router port and we don't create a port
# on OFC. The port is removed in l3_db._create_router_gw_port.
if not port['fixed_ips']:
msg = _('RouterOpenFlowDriver.add_interface(): the requested port '
'has no subnet. add_interface() is skipped. '
'router_id=%(id)s, port=%(port)s)')
LOG.warning(msg, {'id': router_id, 'port': port})
return port
fixed_ip = port['fixed_ips'][0]
subnet = self.plugin._get_subnet(context, fixed_ip['subnet_id'])
port_info = {'network_id': port['network_id'],
'ip_address': fixed_ip['ip_address'],
'cidr': subnet['cidr'],
'mac_address': port['mac_address']}
try:
self.ofc.add_ofc_router_interface(context, router_id,
port_id, port_info)
new_status = nconst.ROUTER_STATUS_ACTIVE
self.plugin._update_resource_status(
context, "port", port_id, new_status)
return port
except (nexc.OFCException, nexc.OFCMappingNotFound) as exc:
with excutils.save_and_reraise_exception():
reason = _("add_router_interface() failed due to %s") % exc
LOG.error(reason)
new_status = nconst.ROUTER_STATUS_ERROR
self.plugin._update_resource_status(
context, "port", port_id, new_status)
@call_log.log
def delete_interface(self, context, router_id, port):
port_id = port['id']
try:
self.ofc.delete_ofc_router_interface(context, router_id, port_id)
new_status = nconst.ROUTER_STATUS_ACTIVE
self.plugin._update_resource_status(context, "port", port_id,
new_status)
port['status'] = new_status
return port
except (nexc.OFCException, nexc.OFCMappingNotFound) as exc:
with excutils.save_and_reraise_exception():
reason = _("delete_router_interface() failed due to %s") % exc
LOG.error(reason)
new_status = nconst.ROUTER_STATUS_ERROR
self.plugin._update_resource_status(context, "port", port_id,
new_status)
|
notnola/pyalienfx
|
refs/heads/master
|
usb/_debug.py
|
10
|
# Copyright (C) 2009-2011 Wander Lairson Costa
#
# The following terms apply to all files associated
# with the software unless explicitly disclaimed in individual files.
#
# The authors hereby grant permission to use, copy, modify, distribute,
# and license this software and its documentation for any purpose, provided
# that existing copyright notices are retained in all copies and that this
# notice is included verbatim in any distributions. No written agreement,
# license, or royalty fee is required for any of the authorized uses.
# Modifications to this software may be copyrighted by their authors
# and need not follow the licensing terms described here, provided that
# the new terms are clearly indicated on the first page of each file where
# they apply.
#
# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
# MODIFICATIONS.
__author__ = 'Wander Lairson Costa'
__all__ = ['methodtrace', 'functiontrace']
import logging
import usb._interop as _interop
def _trace_function_call(logger, fname, *args, **named_args):
logger.debug(
# TODO: check if 'f' is a method or a free function
fname + '(' + \
', '.join((str(val) for val in args)) + \
', '.join((name + '=' + str(val) for name, val in named_args.items())) + ')'
)
# decorator for methods calls tracing
def methodtrace(logger):
def decorator_logging(f):
def do_trace(*args, **named_args):
# this if is just a optimization to avoid unecessary string formatting
if logging.DEBUG >= logger.getEffectiveLevel():
fn = type(args[0]).__name__ + '.' + f.__name__
_trace_function_call(logger, fn, *args[1:], **named_args)
return f(*args, **named_args)
_interop._update_wrapper(do_trace, f)
return do_trace
return decorator_logging
# decorator for methods calls tracing
def functiontrace(logger):
def decorator_logging(f):
def do_trace(*args, **named_args):
# this if is just a optimization to avoid unecessary string formatting
if logging.DEBUG >= logger.getEffectiveLevel():
_trace_function_call(logger, f.__name__, *args, **named_args)
return f(*args, **named_args)
_interop._update_wrapper(do_trace, f)
return do_trace
return decorator_logging
|
mosterta/xbmc
|
refs/heads/master
|
lib/libUPnP/Neptune/Extras/Tools/Logging/NeptuneLogConsoleMulticast.py
|
202
|
#!/usr/bin/env python
from struct import *
from socket import *
from optparse import OptionParser
UDP_ADDR = "0.0.0.0"
UDP_MULTICAST_ADDR = "239.255.255.100"
UDP_PORT = 7724
BUFFER_SIZE = 65536
#HEADER_KEYS = ['Logger', 'Level', 'Source-File', 'Source-Function', 'Source-Line', 'TimeStamp']
HEADER_KEYS = {
'mini': ('Level'),
'standard': ('Logger', 'Level', 'Source-Function'),
'long': ('Logger', 'Level', 'Source-File', 'Source-Line', 'Source-Function'),
'all': ('Logger', 'Level', 'Source-File', 'Source-Line', 'Source-Function', 'TimeStamp'),
'custom': ()
}
Senders = {}
class LogRecord:
def __init__(self, data):
offset = 0
self.headers = {}
for line in data.split("\r\n"):
offset += len(line)+2
if ':' not in line: break
key,value=line.split(":",1)
self.headers[key] = value.strip()
self.body = data[offset:]
def __getitem__(self, index):
return self.headers[index]
def format(self, sender_index, keys):
parts = ['['+str(sender_index)+']']
if 'Level' in keys:
parts.append('['+self.headers['Level']+']')
if 'Logger' in keys:
parts.append(self.headers['Logger'])
if 'TimeStamp' in keys:
parts.append(self.headers['TimeStamp'])
if 'Source-File' in keys:
if 'Source-Line' in keys:
parts.append(self.headers['Source-File']+':'+self.headers['Source-Line'])
else:
parts.append(self.headers['Source-File'])
if 'TimeStamp' in keys:
parts.append(self.headers['TimeStamp'])
if 'Source-Function' in keys:
parts.append(self.headers['Source-Function'])
parts.append(self.body)
return ' '.join(parts)
class Listener:
def __init__(self, format='standard', port=UDP_PORT):
self.socket = socket(AF_INET,SOCK_DGRAM)
self.socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
mreq = pack("4sl", inet_aton(UDP_MULTICAST_ADDR), INADDR_ANY)
self.socket.setsockopt(IPPROTO_IP, IP_ADD_MEMBERSHIP, mreq)
self.socket.bind((UDP_ADDR, port))
self.format_keys = HEADER_KEYS[format]
def listen(self):
while True:
data,addr = self.socket.recvfrom(BUFFER_SIZE)
sender_index = len(Senders.keys())
if addr in Senders:
sender_index = Senders[addr]
else:
print "### NEW SENDER:", addr
Senders[addr] = sender_index
record = LogRecord(data)
print record.format(sender_index, self.format_keys)
### main
parser = OptionParser(usage="%prog [options]")
parser.add_option("-p", "--port", dest="port", help="port number to listen on", type="int", default=UDP_PORT)
parser.add_option("-f", "--format", dest="format", help="log format (mini, standard, long, or all)", choices=('mini', 'standard', 'long', 'all'), default='standard')
(options, args) = parser.parse_args()
print "Listening on port", options.port
l = Listener(format=options.format, port=options.port)
l.listen()
|
jhartford/DeepIV
|
refs/heads/master
|
deepiv/models.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import warnings
import deepiv.samplers as samplers
import deepiv.densities as densities
from deepiv.custom_gradients import replace_gradients_mse
from keras.models import Model
from keras import backend as K
from keras.layers import Lambda, InputLayer
from keras.engine import topology
try:
import h5py
except ImportError:
h5py = None
import keras.utils
import numpy
from sklearn import linear_model
from sklearn.decomposition import PCA
from scipy.stats import norm
class Treatment(Model):
'''
Adds sampling functionality to a Keras model and extends the losses to support
mixture of gaussian losses.
# Argument
'''
def _get_sampler_by_string(self, loss):
output = self.outputs[0]
inputs = self.inputs
if loss in ["MSE", "mse", "mean_squared_error"]:
output += samplers.random_normal(K.shape(output), mean=0.0, std=1.0)
draw_sample = K.function(inputs + [K.learning_phase()], [output])
def sample_gaussian(inputs, use_dropout=False):
'''
Helper to draw samples from a gaussian distribution
'''
return draw_sample(inputs + [int(use_dropout)])[0]
return sample_gaussian
elif loss == "binary_crossentropy":
output = K.random_binomial(K.shape(output), p=output)
draw_sample = K.function(inputs + [K.learning_phase()], [output])
def sample_binomial(inputs, use_dropout=False):
'''
Helper to draw samples from a binomial distribution
'''
return draw_sample(inputs + [int(use_dropout)])[0]
return sample_binomial
elif loss in ["mean_absolute_error", "mae", "MAE"]:
output += samplers.random_laplace(K.shape(output), mu=0.0, b=1.0)
draw_sample = K.function(inputs + [K.learning_phase()], [output])
def sample_laplace(inputs, use_dropout=False):
'''
Helper to draw samples from a Laplacian distribution
'''
return draw_sample(inputs + [int(use_dropout)])[0]
return sample_laplace
elif loss == "mixture_of_gaussians":
pi, mu, log_sig = densities.split_mixture_of_gaussians(output, self.n_components)
samples = samplers.random_gmm(pi, mu, K.exp(log_sig))
draw_sample = K.function(inputs + [K.learning_phase()], [samples])
return lambda inputs, use_dropout: draw_sample(inputs + [int(use_dropout)])[0]
else:
raise NotImplementedError("Unrecognised loss: %s.\
Cannot build a generic sampler" % loss)
def _prepare_sampler(self, loss):
'''
Build sampler
'''
if isinstance(loss, str):
self._sampler = self._get_sampler_by_string(loss)
else:
warnings.warn("You're using a custom loss function. Make sure you implement\
the model's sample() fuction yourself.")
def compile(self, optimizer, loss, metrics=None, loss_weights=None,
sample_weight_mode=None, n_components=None, **kwargs):
'''
Overrides the existing keras compile function to add a sampler building
step to the model compilation phase. Once compiled, one can draw samples
from the network using the sample() function and adds support for mixture
of gaussian loss.
'''
if loss == "mixture_of_gaussians":
if n_components is None:
raise Exception("When using mixture of gaussian loss you must\
supply n_components argument")
self.n_components = n_components
self._prepare_sampler(loss)
loss = lambda y_true, y_pred: densities.mixture_of_gaussian_loss(y_true,
y_pred,
n_components)
def predict_mean(x, batch_size=32, verbose=0):
'''
Helper to just predict the expected value of the mixture
of gaussian rather than the parameters for the distribution.
'''
y_hat = super(Treatment, self).predict(x, batch_size, verbose)
n_c = n_components
return (y_hat[:, 0:n_c] * y_hat[:, n_c:2*n_c]).sum(axis=1, keepdims=True)
self.predict_mean = predict_mean
else:
self._prepare_sampler(loss)
super(Treatment, self).compile(optimizer, loss, metrics=metrics, loss_weights=loss_weights,
sample_weight_mode=sample_weight_mode, **kwargs)
def sample(self, inputs, n_samples=1, use_dropout=False):
'''
Draw samples from the keras model.
'''
if hasattr(self, "_sampler"):
if not isinstance(inputs, list):
inputs = [inputs]
inputs = [i.repeat(n_samples, axis=0) for i in inputs]
return self._sampler(inputs, use_dropout)
else:
raise Exception("Compile model with loss before sampling")
class Response(Model):
'''
Extends the Keras Model class to support sampling from the Treatment
model during training.
Overwrites the existing fit_generator function.
# Arguments
In addition to the standard model arguments, a Response object takes
a Treatment object as input so that it can sample from the fitted treatment
distriubtion during training.
'''
def __init__(self, treatment, **kwargs):
if isinstance(treatment, Treatment):
self.treatment = treatment
else:
raise TypeError("Expected a treatment model of type Treatment. \
Got a model of type %s. Remember to train your\
treatment model first." % type(treatment))
super(Response, self).__init__(**kwargs)
def compile(self, optimizer, loss, metrics=None, loss_weights=None, sample_weight_mode=None,
unbiased_gradient=False,n_samples=1, batch_size=None):
super(Response, self).compile(optimizer=optimizer, loss=loss, loss_weights=loss_weights,
sample_weight_mode=sample_weight_mode)
self.unbiased_gradient = unbiased_gradient
if unbiased_gradient:
if loss in ["MSE", "mse", "mean_squared_error"]:
if batch_size is None:
raise ValueError("Must supply a batch_size argument if using unbiased gradients. Currently batch_size is None.")
replace_gradients_mse(self, optimizer, batch_size=batch_size, n_samples=n_samples)
else:
warnings.warn("Unbiased gradient only implemented for mean square error loss. It is unnecessary for\
logistic losses and currently not implemented for absolute error losses.")
def fit(self, x=None, y=None, batch_size=512, epochs=1, verbose=1, callbacks=None,
validation_data=None, class_weight=None, initial_epoch=0, samples_per_batch=None,
seed=None, observed_treatments=None):
'''
Trains the model by sampling from the fitted treament distribution.
# Arguments
x: list of numpy arrays. The first element should *always* be the instrument variables.
y: (numpy array). Target response variables.
The remainder of the arguments correspond to the Keras definitions.
'''
batch_size = numpy.minimum(y.shape[0], batch_size)
if seed is None:
seed = numpy.random.randint(0, 1e6)
if samples_per_batch is None:
if self.unbiased_gradient:
samples_per_batch = 2
else:
samples_per_batch = 1
if observed_treatments is None:
generator = SampledSequence(x[1:], x[0], y, batch_size, self.treatment.sample, samples_per_batch)
else:
generator = OnesidedUnbaised(x[1:], x[0], y, observed_treatments, batch_size,
self.treatment.sample, samples_per_batch)
steps_per_epoch = y.shape[0] // batch_size
super(Response, self).fit_generator(generator=generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs, verbose=verbose,
callbacks=callbacks, validation_data=validation_data,
class_weight=class_weight, initial_epoch=initial_epoch)
def fit_generator(self, **kwargs):
'''
We use override fit_generator to support sampling from the treatment model during training.
If you need this functionality, you'll need to build a generator that samples from the
treatment and performs whatever transformations you're performing. Please submit a pull
request if you implement this.
'''
raise NotImplementedError("We use override fit_generator to support sampling from the\
treatment model during training.")
def expected_representation(self, x, z, n_samples=100, batch_size=None, seed=None):
inputs = [z, x]
if not hasattr(self, "_E_representation"):
if batch_size is None:
batch_size = inputs[0].shape[0]
steps = 1
else:
steps = inputs[0].shape[0] // batch_size
intermediate_layer_model = Model(inputs=self.inputs,
outputs=self.layers[-2].output)
def pred(inputs, n_samples=100, seed=None):
features = inputs[1]
samples = self.treatment.sample(inputs, n_samples)
batch_features = [features.repeat(n_samples, axis=0)] + [samples]
representation = intermediate_layer_model.predict(batch_features)
return representation.reshape((inputs[0].shape[0], n_samples, -1)).mean(axis=1)
self._E_representation = pred
return self._E_representation(inputs, n_samples, seed)
else:
return self._E_representation(inputs, n_samples, seed)
def conditional_representation(self, x, p):
inputs = [x, p]
if not hasattr(self, "_c_representation"):
intermediate_layer_model = Model(inputs=self.inputs,
outputs=self.layers[-2].output)
self._c_representation = intermediate_layer_model.predict
return self._c_representation(inputs)
else:
return self._c_representation(inputs)
def dropout_predict(self, x, z, n_samples=100):
if isinstance(x, list):
inputs = [z] + x
else:
inputs = [z, x]
if not hasattr(self, "_dropout_predict"):
predict_with_dropout = K.function(self.inputs + [K.learning_phase()],
[self.layers[-1].output])
def pred(inputs, n_samples = 100):
# draw samples from the treatment network with dropout turned on
samples = self.treatment.sample(inputs, n_samples, use_dropout=True)
# prepare inputs for the response network
rep_inputs = [i.repeat(n_samples, axis=0) for i in inputs[1:]] + [samples]
# return outputs from the response network with dropout turned on (learning_phase=0)
return predict_with_dropout(rep_inputs + [1])[0]
self._dropout_predict = pred
return self._dropout_predict(inputs, n_samples)
else:
return self._dropout_predict(inputs, n_samples)
def credible_interval(self, x, z, n_samples=100, p=0.95):
'''
Return a credible interval of size p using dropout variational inference.
'''
if isinstance(x, list):
n = x[0].shape[0]
else:
n = x.shape[0]
alpha = (1-p) / 2.
samples = self.dropout_predict(x, z, n_samples).reshape((n, n_samples, -1))
upper = numpy.percentile(samples.copy(), 100*(p+alpha), axis=1)
lower = numpy.percentile(samples.copy(), 100*(alpha), axis=1)
return lower, upper
def _add_constant(self, X):
return numpy.concatenate((numpy.ones((X.shape[0], 1)), X), axis=1)
def predict_confidence(self, x, p):
if hasattr(self, "_predict_confidence"):
return self._predict_confidence(x, p)
else:
raise Exception("Call fit_confidence_interval before running predict_confidence")
def fit_confidence_interval(self, x_lo, z_lo, p_lo, y_lo, n_samples=100, alpha=0.):
eta_bar = self.expected_representation(x=x_lo, z=z_lo, n_samples=n_samples)
pca = PCA(1-1e-16, svd_solver="full", whiten=True)
pca.fit(eta_bar)
eta_bar = pca.transform(eta_bar)
eta_lo_prime = pca.transform(self.conditional_representation(x_lo, p_lo))
eta_lo = self._add_constant(eta_lo_prime)
ols1 = linear_model.Ridge(alpha=alpha, fit_intercept=True)
ols1.fit(eta_bar, eta_lo_prime)
hhat = ols1.predict(eta_bar)
ols2 = linear_model.Ridge(alpha=alpha, fit_intercept=False)
ols2.fit(self._add_constant(hhat), y_lo)
yhat = ols2.predict(eta_lo)
hhi = numpy.linalg.inv(numpy.dot(eta_lo.T, eta_lo))
heh = numpy.dot(eta_lo.T, numpy.square(y_lo - yhat) * eta_lo)
V = numpy.dot(numpy.dot(hhi, heh), hhi)
def pred(xx, pp):
H = self._add_constant(pca.transform(self.conditional_representation(xx,pp)))
sdhb = numpy.sqrt(numpy.diag(numpy.dot(numpy.dot(H, V), H.T)))
hb = ols2.predict(H).flatten()
return hb, sdhb
self._predict_confidence = pred
class SampledSequence(keras.utils.Sequence):
def __init__(self, features, instruments, outputs, batch_size, sampler, n_samples=1, seed=None):
self.rng = numpy.random.RandomState(seed)
if not isinstance(features, list):
features = [features.copy()]
else:
features = [f.copy() for f in features]
self.features = features
self.instruments = instruments.copy()
self.outputs = outputs.copy()
if batch_size < self.instruments.shape[0]:
self.batch_size = batch_size
else:
self.batch_size = self.instruments.shape[0]
self.sampler = sampler
self.n_samples = n_samples
self.current_index = 0
self.shuffle()
def __len__(self):
if isinstance(self.outputs, list):
return self.outputs[0].shape[0] // self.batch_size
else:
return self.outputs.shape[0] // self.batch_size
def shuffle(self):
idx = self.rng.permutation(numpy.arange(self.instruments.shape[0]))
self.instruments = self.instruments[idx,:]
self.outputs = self.outputs[idx,:]
self.features = [f[idx,:] for f in self.features]
def __getitem__(self,idx):
instruments = [self.instruments[idx*self.batch_size:(idx+1)*self.batch_size, :]]
features = [inp[idx*self.batch_size:(idx+1)*self.batch_size, :] for inp in self.features]
sampler_input = instruments + features
samples = self.sampler(sampler_input, self.n_samples)
batch_features = [f[idx*self.batch_size:(idx+1)*self.batch_size].repeat(self.n_samples, axis=0) for f in self.features] + [samples]
batch_y = self.outputs[idx*self.batch_size:(idx+1)*self.batch_size].repeat(self.n_samples, axis=0)
if idx == (len(self) - 1):
self.shuffle()
return batch_features, batch_y
class OnesidedUnbaised(SampledSequence):
def __init__(self, features, instruments, outputs, treatments, batch_size, sampler, n_samples=1, seed=None):
self.rng = numpy.random.RandomState(seed)
if not isinstance(features, list):
features = [features.copy()]
else:
features = [f.copy() for f in features]
self.features = features
self.instruments = instruments.copy()
self.outputs = outputs.copy()
self.treatments = treatments.copy()
self.batch_size = batch_size
self.sampler = sampler
self.n_samples = n_samples
self.current_index = 0
self.shuffle()
def shuffle(self):
idx = self.rng.permutation(numpy.arange(self.instruments.shape[0]))
self.instruments = self.instruments[idx,:]
self.outputs = self.outputs[idx,:]
self.features = [f[idx,:] for f in self.features]
self.treatments = self.treatments[idx,:]
def __getitem__(self, idx):
instruments = [self.instruments[idx*self.batch_size:(idx+1)*self.batch_size, :]]
features = [inp[idx*self.batch_size:(idx+1)*self.batch_size, :] for inp in self.features]
observed_treatments = self.treatments[idx*self.batch_size:(idx+1)*self.batch_size, :]
sampler_input = instruments + features
samples = self.sampler(sampler_input, self.n_samples // 2)
samples = numpy.concatenate([observed_treatments, samples], axis=0)
batch_features = [f[idx*self.batch_size:(idx+1)*self.batch_size].repeat(self.n_samples, axis=0) for f in self.features] + [samples]
batch_y = self.outputs[idx*self.batch_size:(idx+1)*self.batch_size].repeat(self.n_samples, axis=0)
if idx == (len(self) - 1):
self.shuffle()
return batch_features, batch_y
def load_weights(filepath, model):
if h5py is None:
raise ImportError('`load_weights` requires h5py.')
with h5py.File(filepath, mode='r') as f:
# set weights
topology.load_weights_from_hdf5_group(f['model_weights'], model.layers)
return model
|
jluissandovalm/smd_lammps
|
refs/heads/luis
|
tools/amber2lmp/dump2trj.py
|
52
|
#! /usr/freeware/bin/python
#
# This is dump2trj, a program written by Keir E. Novik to convert
# Lammps position dump files to Amber trajectory files.
#
# Copyright 2000, 2001 Keir E. Novik; all rights reserved.
#
# Modified by Vikas Varshney, U Akron, 5 July 2005, as described in README
#
#============================================================
def Convert_files():
'Handle the whole conversion process'
print
print 'Welcome to dump2trj, a program to convert Lammps position dump files to\nAmber trajectory format!'
print
Basename_list = Find_dump_files()
for Basename in Basename_list:
t = Trajectory()
if t.Read_dump(Basename):
t.Write_trj(Basename)
del t
print
#============================================================
def Find_dump_files():
'Look for sets of Lammps position dump files to process'
'''If passed something on the command line, treat it as a list of
files to process. Otherwise, look for *.dump in the current
directory.
'''
import os, sys
Basename_list = []
# Extract basenames from command line
for Name in sys.argv[1:]:
if Name[-5:] == '.dump':
Basename_list.append(Name[:-5])
else:
Basename_list.append(Name)
if Basename_list == []:
print 'Looking for Lammps dump files...',
Dir_list = os.listdir('.')
for Filename in Dir_list:
if Filename[-5:] == '.dump':
Basename_list.append(Filename[:-5])
Basename_list.sort()
if Basename_list != []:
print 'found',
for i in range(len(Basename_list)-1):
print Basename_list[i] + ',',
print Basename_list[-1] + '\n'
if Basename_list == []:
print 'none.\n'
return Basename_list
#============================================================
class Snapshot:
def __init__(self, The_trajectory):
'Initialise the Snapshot class'
self.timestep = The_trajectory.timestep
self.atoms = The_trajectory.atoms
self.xlo = The_trajectory.xlo
self.xhi = The_trajectory.xhi
self.ylo = The_trajectory.ylo
self.yhi = The_trajectory.yhi
self.zlo = The_trajectory.zlo
self.zhi = The_trajectory.zhi
#--------------------------------------------------------
def Read_dump(self, Lines):
'Read a snapshot (timestep) from a Lammps position dump file'
'''Trajectory.Read_dump() will pass us only the lines we need.
'''
self.Atom_list = Lines
#--------------------------------------------------------
def Write_trj(self, F):
'Write a snapshot (timestep) to an Amber trajectory file'
'''The Atom_list must be sorted, as it may not be in order
(for example, in a parallel Lammps simulation).
'''
import string
xBOX = (self.xhi - self.xlo)
yBOX = (self.yhi - self.ylo)
zBOX = (self.zhi - self.zlo)
Min = min(self.xlo, self.ylo, self.zlo)
Max = max(self.xhi, self.yhi, self.zhi, xBOX, yBOX, zBOX)
if Min <= -1000 or Max >= 10000:
print '(error: coordinates too large!)'
return
Print_list = []
for Line in NumericalSort(self.Atom_list):
Item_list = string.split(Line)
x = xBOX * (Float(Item_list[2])+Float(Item_list[5])) # Modified main box x-coordinate to actual x-coordinate
y = yBOX * (Float(Item_list[3])+Float(Item_list[6])) # Modified main box y-coordinate to actual y-coordinate
z = zBOX * (Float(Item_list[4])+Float(Item_list[7])) # Modified main box z-coordinate to actual z-coordinate
Print_list.append('%(x)8.3f' % vars())
Print_list.append('%(y)8.3f' % vars())
Print_list.append('%(z)8.3f' % vars())
if len(Print_list) > 9:
Line = ''
for j in range(10):
Line = Line + Print_list[j]
Line = Line + '\n'
Print_list = Print_list[10:]
try:
F.write(Line)
except IOError, Detail:
print '(error:', Detail[1] + '!)'
F.close()
return
if len(Print_list) > 0:
Line = ''
for j in range(len(Print_list)):
Line = Line + Print_list[j]
Line = Line + '\n'
try:
F.write(Line)
except IOError, Detail:
print '(error:', Detail[1] + '!)'
F.close()
return
Line = '%(xBOX)8.3f%(yBOX)8.3f%(zBOX)8.3f\n' % vars()
try:
F.write(Line)
except IOError, Detail:
print '(error:', Detail[1] + '!)'
F.close()
return
#============================================================
class Trajectory:
def Read_dump(self, Basename):
'Read a Lammps position dump file'
import string, sys
Filename = Basename + '.dump'
print 'Reading', Filename + '...',
sys.stdout.flush()
try:
F = open(Filename)
except IOError, Detail:
print '(error:', Detail[1] + '!)'
return 0
try:
Lines = F.readlines()
except IOError, Detail:
print '(error:', Detail[1] + '!)'
F.close()
return 0
F.close()
i = 0
self.Snapshot_list = []
# Parse the dump
while i < len(Lines):
if string.find(Lines[i], 'ITEM: TIMESTEP') != -1:
# Read the timestep
self.timestep = int(Lines[i+1])
i = i + 2
elif string.find(Lines[i], 'ITEM: NUMBER OF ATOMS') != -1:
# Read the number of atoms
self.atoms = int(Lines[i+1])
i = i + 2
elif string.find(Lines[i], 'ITEM: BOX BOUNDS') != -1:
# Read the periodic box boundaries
Item_list = string.split(Lines[i+1])
self.xlo = Float(Item_list[0])
self.xhi = Float(Item_list[1])
Item_list = string.split(Lines[i+2])
self.ylo = Float(Item_list[0])
self.yhi = Float(Item_list[1])
Item_list = string.split(Lines[i+3])
self.zlo = Float(Item_list[0])
self.zhi = Float(Item_list[1])
i = i + 4
elif string.find(Lines[i], 'ITEM: ATOMS') != -1:
# Read atom positions
self.Snapshot_list.append(Snapshot(self))
Start = i + 1
End = Start + self.atoms
self.Snapshot_list[-1].Read_dump(Lines[Start:End])
i = i + self.atoms + 1
else:
print '(error: unknown line in file!)'
return
print 'done.'
return 1
#--------------------------------------------------------
def Write_trj(self, Basename):
'Write an Amber trajectory file'
import os, sys
Filename = Basename + '.mdcrd'
Dir_list = os.listdir('.')
i = 1
while Filename in Dir_list:
Filename = Basename + `i` + '.mdcrd'
i = i + 1
del i
print 'Writing', Filename + '...',
sys.stdout.flush()
try:
F = open(Filename, 'w')
except IOError, Detail:
print '(error:', Detail[1] + '!)'
return
try:
F.write(Basename + '\n')
except IOError, Detail:
print '(error:', Detail[1] + '!)'
F.close()
return
for S in self.Snapshot_list:
S.Write_trj(F)
F.close()
print 'done.'
#============================================================
def Float(s):
'Return the string s as a float, if possible'
try:
x = float(s)
except ValueError:
if s[-1] == ',':
s = s[:-1]
x = float(s)
return x
#============================================================
def NumericalSort(String_list):
'Sort a list of strings by the integer value of the first element'
import string
Working_list = []
for s in String_list:
Working_list.append((int(string.split(s)[0]), s))
Working_list.sort()
Return_list = []
for Tuple in Working_list:
Return_list.append(Tuple[1])
return Return_list
#============================================================
Convert_files()
|
liorvh/infernal-twin
|
refs/heads/master
|
build/reportlab/build/lib.linux-i686-2.7/reportlab/lib/geomutils.py
|
35
|
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
__version__=''' $Id$ '''
__doc__='''Utility functions for geometrical operations.'''
def normalizeTRBL(p):
'''
Useful for interpreting short descriptions of paddings, borders, margin, etc.
Expects a single value or a tuple of length 2 to 4.
Returns a tuple representing (clockwise) the value(s) applied to the 4 sides of a rectangle:
If a single value is given, that value is applied to all four sides.
If two or three values are given, the missing values are taken from the opposite side(s).
If four values are given they are returned unchanged.
>>> normalizeTRBL(1)
(1, 1, 1, 1)
>>> normalizeTRBL((1, 1.2))
(1, 1.2, 1, 1.2)
>>> normalizeTRBL((1, 1.2, 0))
(1, 1.2, 0, 1.2)
>>> normalizeTRBL((1, 1.2, 0, 8))
(1, 1.2, 0, 8)
'''
if not isinstance(p, (tuple, list)):
return (p,)*4
l = len(p)
if l < 2 or l > 4:
raise ValueError('A padding must have between 2 and 4 values but got %d.' % l)
return tuple(p) + tuple([ p[i-2] for i in range(l, 4) ])
|
Alerion/fantasy_map
|
refs/heads/master
|
fantasy_map/wsgi.py
|
1
|
"""
WSGI config for fantasy_map project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "fantasy_map.settings")
application = get_wsgi_application()
|
pfhayes/boto
|
refs/heads/develop
|
tests/integration/cloudhsm/__init__.py
|
586
|
# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
|
weimingtom/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/xml/sax/xmlreader.py
|
53
|
"""An XML Reader is the SAX 2 name for an XML parser. XML Parsers
should be based on this code. """
from . import handler
from ._exceptions import SAXNotSupportedException, SAXNotRecognizedException
# ===== XMLREADER =====
class XMLReader:
"""Interface for reading an XML document using callbacks.
XMLReader is the interface that an XML parser's SAX2 driver must
implement. This interface allows an application to set and query
features and properties in the parser, to register event handlers
for document processing, and to initiate a document parse.
All SAX interfaces are assumed to be synchronous: the parse
methods must not return until parsing is complete, and readers
must wait for an event-handler callback to return before reporting
the next event."""
def __init__(self):
self._cont_handler = handler.ContentHandler()
self._dtd_handler = handler.DTDHandler()
self._ent_handler = handler.EntityResolver()
self._err_handler = handler.ErrorHandler()
def parse(self, source):
"Parse an XML document from a system identifier or an InputSource."
raise NotImplementedError("This method must be implemented!")
def getContentHandler(self):
"Returns the current ContentHandler."
return self._cont_handler
def setContentHandler(self, handler):
"Registers a new object to receive document content events."
self._cont_handler = handler
def getDTDHandler(self):
"Returns the current DTD handler."
return self._dtd_handler
def setDTDHandler(self, handler):
"Register an object to receive basic DTD-related events."
self._dtd_handler = handler
def getEntityResolver(self):
"Returns the current EntityResolver."
return self._ent_handler
def setEntityResolver(self, resolver):
"Register an object to resolve external entities."
self._ent_handler = resolver
def getErrorHandler(self):
"Returns the current ErrorHandler."
return self._err_handler
def setErrorHandler(self, handler):
"Register an object to receive error-message events."
self._err_handler = handler
def setLocale(self, locale):
"""Allow an application to set the locale for errors and warnings.
SAX parsers are not required to provide localization for errors
and warnings; if they cannot support the requested locale,
however, they must throw a SAX exception. Applications may
request a locale change in the middle of a parse."""
raise SAXNotSupportedException("Locale support not implemented")
def getFeature(self, name):
"Looks up and returns the state of a SAX2 feature."
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def setFeature(self, name, state):
"Sets the state of a SAX2 feature."
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def getProperty(self, name):
"Looks up and returns the value of a SAX2 property."
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
def setProperty(self, name, value):
"Sets the value of a SAX2 property."
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
class IncrementalParser(XMLReader):
"""This interface adds three extra methods to the XMLReader
interface that allow XML parsers to support incremental
parsing. Support for this interface is optional, since not all
underlying XML parsers support this functionality.
When the parser is instantiated it is ready to begin accepting
data from the feed method immediately. After parsing has been
finished with a call to close the reset method must be called to
make the parser ready to accept new data, either from feed or
using the parse method.
Note that these methods must _not_ be called during parsing, that
is, after parse has been called and before it returns.
By default, the class also implements the parse method of the XMLReader
interface using the feed, close and reset methods of the
IncrementalParser interface as a convenience to SAX 2.0 driver
writers."""
def __init__(self, bufsize=2**16):
self._bufsize = bufsize
XMLReader.__init__(self)
def parse(self, source):
from . import saxutils
source = saxutils.prepare_input_source(source)
self.prepareParser(source)
file = source.getByteStream()
buffer = file.read(self._bufsize)
while buffer:
self.feed(buffer)
buffer = file.read(self._bufsize)
self.close()
def feed(self, data):
"""This method gives the raw XML data in the data parameter to
the parser and makes it parse the data, emitting the
corresponding events. It is allowed for XML constructs to be
split across several calls to feed.
feed may raise SAXException."""
raise NotImplementedError("This method must be implemented!")
def prepareParser(self, source):
"""This method is called by the parse implementation to allow
the SAX 2.0 driver to prepare itself for parsing."""
raise NotImplementedError("prepareParser must be overridden!")
def close(self):
"""This method is called when the entire XML document has been
passed to the parser through the feed method, to notify the
parser that there are no more data. This allows the parser to
do the final checks on the document and empty the internal
data buffer.
The parser will not be ready to parse another document until
the reset method has been called.
close may raise SAXException."""
raise NotImplementedError("This method must be implemented!")
def reset(self):
"""This method is called after close has been called to reset
the parser so that it is ready to parse new documents. The
results of calling parse or feed after close without calling
reset are undefined."""
raise NotImplementedError("This method must be implemented!")
# ===== LOCATOR =====
class Locator:
"""Interface for associating a SAX event with a document
location. A locator object will return valid results only during
calls to DocumentHandler methods; at any other time, the
results are unpredictable."""
def getColumnNumber(self):
"Return the column number where the current event ends."
return -1
def getLineNumber(self):
"Return the line number where the current event ends."
return -1
def getPublicId(self):
"Return the public identifier for the current event."
return None
def getSystemId(self):
"Return the system identifier for the current event."
return None
# ===== INPUTSOURCE =====
class InputSource:
"""Encapsulation of the information needed by the XMLReader to
read entities.
This class may include information about the public identifier,
system identifier, byte stream (possibly with character encoding
information) and/or the character stream of an entity.
Applications will create objects of this class for use in the
XMLReader.parse method and for returning from
EntityResolver.resolveEntity.
An InputSource belongs to the application, the XMLReader is not
allowed to modify InputSource objects passed to it from the
application, although it may make copies and modify those."""
def __init__(self, system_id = None):
self.__system_id = system_id
self.__public_id = None
self.__encoding = None
self.__bytefile = None
self.__charfile = None
def setPublicId(self, public_id):
"Sets the public identifier of this InputSource."
self.__public_id = public_id
def getPublicId(self):
"Returns the public identifier of this InputSource."
return self.__public_id
def setSystemId(self, system_id):
"Sets the system identifier of this InputSource."
self.__system_id = system_id
def getSystemId(self):
"Returns the system identifier of this InputSource."
return self.__system_id
def setEncoding(self, encoding):
"""Sets the character encoding of this InputSource.
The encoding must be a string acceptable for an XML encoding
declaration (see section 4.3.3 of the XML recommendation).
The encoding attribute of the InputSource is ignored if the
InputSource also contains a character stream."""
self.__encoding = encoding
def getEncoding(self):
"Get the character encoding of this InputSource."
return self.__encoding
def setByteStream(self, bytefile):
"""Set the byte stream (a Python file-like object which does
not perform byte-to-character conversion) for this input
source.
The SAX parser will ignore this if there is also a character
stream specified, but it will use a byte stream in preference
to opening a URI connection itself.
If the application knows the character encoding of the byte
stream, it should set it with the setEncoding method."""
self.__bytefile = bytefile
def getByteStream(self):
"""Get the byte stream for this input source.
The getEncoding method will return the character encoding for
this byte stream, or None if unknown."""
return self.__bytefile
def setCharacterStream(self, charfile):
"""Set the character stream for this input source. (The stream
must be a Python 2.0 Unicode-wrapped file-like that performs
conversion to Unicode strings.)
If there is a character stream specified, the SAX parser will
ignore any byte stream and will not attempt to open a URI
connection to the system identifier."""
self.__charfile = charfile
def getCharacterStream(self):
"Get the character stream for this input source."
return self.__charfile
# ===== ATTRIBUTESIMPL =====
class AttributesImpl:
def __init__(self, attrs):
"""Non-NS-aware implementation.
attrs should be of the form {name : value}."""
self._attrs = attrs
def getLength(self):
return len(self._attrs)
def getType(self, name):
return "CDATA"
def getValue(self, name):
return self._attrs[name]
def getValueByQName(self, name):
return self._attrs[name]
def getNameByQName(self, name):
if name not in self._attrs:
raise KeyError(name)
return name
def getQNameByName(self, name):
if name not in self._attrs:
raise KeyError(name)
return name
def getNames(self):
return list(self._attrs.keys())
def getQNames(self):
return list(self._attrs.keys())
def __len__(self):
return len(self._attrs)
def __getitem__(self, name):
return self._attrs[name]
def keys(self):
return list(self._attrs.keys())
def __contains__(self, name):
return name in self._attrs
def get(self, name, alternative=None):
return self._attrs.get(name, alternative)
def copy(self):
return self.__class__(self._attrs)
def items(self):
return list(self._attrs.items())
def values(self):
return list(self._attrs.values())
# ===== ATTRIBUTESNSIMPL =====
class AttributesNSImpl(AttributesImpl):
def __init__(self, attrs, qnames):
"""NS-aware implementation.
attrs should be of the form {(ns_uri, lname): value, ...}.
qnames of the form {(ns_uri, lname): qname, ...}."""
self._attrs = attrs
self._qnames = qnames
def getValueByQName(self, name):
for (nsname, qname) in self._qnames.items():
if qname == name:
return self._attrs[nsname]
raise KeyError(name)
def getNameByQName(self, name):
for (nsname, qname) in self._qnames.items():
if qname == name:
return nsname
raise KeyError(name)
def getQNameByName(self, name):
return self._qnames[name]
def getQNames(self):
return list(self._qnames.values())
def copy(self):
return self.__class__(self._attrs, self._qnames)
def _test():
XMLReader()
IncrementalParser()
Locator()
if __name__ == "__main__":
_test()
|
mrfuxi/django
|
refs/heads/master
|
tests/migrations/migrations_test_apps/alter_fk/author_app/migrations/0002_alter_id.py
|
379
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('author_app', '0001_initial'),
('book_app', '0001_initial'), # Forces the book table to alter the FK
]
operations = [
migrations.AlterField(
model_name='author',
name='id',
field=models.CharField(max_length=10, primary_key=True),
),
]
|
bowang/tensorflow
|
refs/heads/master
|
tensorflow/python/training/ftrl.py
|
47
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ftrl-proximal for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
class FtrlOptimizer(optimizer.Optimizer):
"""Optimizer that implements the FTRL algorithm.
See this [paper](
https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf).
This version has support for both online L2 (the L2 penalty given in the paper
above) and shrinkage-type L2 (which is the addition of an L2 penalty to the
loss function).
"""
def __init__(self,
learning_rate,
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0,
use_locking=False,
name="Ftrl",
accum_name=None,
linear_name=None,
l2_shrinkage_regularization_strength=0.0):
r"""Construct a new FTRL optimizer.
Args:
learning_rate: A float value or a constant float `Tensor`.
learning_rate_power: A float value, must be less or equal to zero.
initial_accumulator_value: The starting value for accumulators.
Only positive values are allowed.
l1_regularization_strength: A float value, must be greater than or
equal to zero.
l2_regularization_strength: A float value, must be greater than or
equal to zero.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Ftrl".
accum_name: The suffix for the variable that keeps the gradient squared
accumulator. If not present, defaults to name.
linear_name: The suffix for the variable that keeps the linear gradient
accumulator. If not present, defaults to name + "_1".
l2_shrinkage_regularization_strength: A float value, must be greater than
or equal to zero. This differs from L2 above in that the L2 above is a
stabilization penalty, whereas this L2 shrinkage is a magnitude penalty.
The FTRL formulation can be written as:
w_{t+1} = argmin_w(\hat{g}_{1:t}w + L1*||w||_1 + L2*||w||_2^2), where
\hat{g} = g + (2*L2_shrinkage*w), and g is the gradient of the loss
function w.r.t. the weights w.
Specifically, in the absence of L1 regularization, it is equivalent to
the following update rule:
w_{t+1} = w_t - lr_t / (1 + 2*L2*lr_t) * g_t -
2*L2_shrinkage*lr_t / (1 + 2*L2*lr_t) * w_t
where lr_t is the learning rate at t.
When input is sparse shrinkage will only happen on the active weights.
Raises:
ValueError: If one of the arguments is invalid.
"""
super(FtrlOptimizer, self).__init__(use_locking, name)
if initial_accumulator_value <= 0.0:
raise ValueError("initial_accumulator_value %f needs to be positive" %
initial_accumulator_value)
if learning_rate_power > 0.0:
raise ValueError("learning_rate_power %f needs to be negative or zero" %
learning_rate_power)
if l1_regularization_strength < 0.0:
raise ValueError(
"l1_regularization_strength %f needs to be positive or zero" %
l1_regularization_strength)
if l2_regularization_strength < 0.0:
raise ValueError(
"l2_regularization_strength %f needs to be positive or zero" %
l2_regularization_strength)
if l2_shrinkage_regularization_strength < 0.0:
raise ValueError(
"l2_shrinkage_regularization_strength %f needs to be positive"
" or zero" % l2_shrinkage_regularization_strength)
self._learning_rate = learning_rate
self._learning_rate_power = learning_rate_power
self._initial_accumulator_value = initial_accumulator_value
self._l1_regularization_strength = l1_regularization_strength
self._l2_regularization_strength = l2_regularization_strength
self._l2_shrinkage_regularization_strength = (
l2_shrinkage_regularization_strength)
self._learning_rate_tensor = None
self._learning_rate_power_tensor = None
self._l1_regularization_strength_tensor = None
self._l2_regularization_strength_tensor = None
self._l2_shrinkage_regularization_strength_tensor = None
self._accum_name = accum_name
self._linear_name = linear_name
def _create_slots(self, var_list):
# Create the "accum" and "linear" slots.
for v in var_list:
with ops.colocate_with(v):
val = constant_op.constant(
self._initial_accumulator_value, dtype=v.dtype, shape=v.get_shape())
self._get_or_make_slot(v, val, "accum", self._accum_name or self._name)
self._zeros_slot(v, "linear", self._linear_name or self._name)
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(
self._learning_rate, name="learning_rate")
self._l1_regularization_strength_tensor = ops.convert_to_tensor(
self._l1_regularization_strength, name="l1_regularization_strength")
self._l2_regularization_strength_tensor = ops.convert_to_tensor(
self._l2_regularization_strength, name="l2_regularization_strength")
self._l2_shrinkage_regularization_strength_tensor = ops.convert_to_tensor(
self._l2_shrinkage_regularization_strength,
name="l2_shrinkage_regularization_strength")
self._learning_rate_power_tensor = ops.convert_to_tensor(
self._learning_rate_power, name="learning_rate_power")
def _apply_dense(self, grad, var):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.apply_ftrl(
var,
accum,
linear,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
else:
return training_ops.apply_ftrl_v2(
var,
accum,
linear,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
def _resource_apply_dense(self, grad, var):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.resource_apply_ftrl(
var.handle,
accum.handle,
linear.handle,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
else:
return training_ops.resource_apply_ftrl_v2(
var.handle,
accum.handle,
linear.handle,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.sparse_apply_ftrl(
var,
accum,
linear,
grad.values,
grad.indices,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
else:
return training_ops.sparse_apply_ftrl_v2(
var,
accum,
linear,
grad.values,
grad.indices,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
grad.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.resource_sparse_apply_ftrl(
var.handle,
accum.handle,
linear.handle,
grad,
indices,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
math_ops.cast(self._l1_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._l2_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._learning_rate_power_tensor, grad.dtype),
use_locking=self._use_locking)
else:
return training_ops.resource_sparse_apply_ftrl_v2(
var.handle,
accum.handle,
linear.handle,
grad,
indices,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
math_ops.cast(self._l1_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._l2_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
grad.dtype),
math_ops.cast(self._learning_rate_power_tensor, grad.dtype),
use_locking=self._use_locking)
|
victormlourenco/kernel_msm
|
refs/heads/android-msm-mako-3.4-lollipop-mr1
|
tools/perf/python/twatch.py
|
7370
|
#! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
|
brianmay/python-tldap
|
refs/heads/master
|
tldap/ldap_passwd.py
|
1
|
# Copyright 2012-2018 Brian May
#
# This file is part of python-tldap.
#
# python-tldap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-tldap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-tldap If not, see <http://www.gnu.org/licenses/>.
""" Hash and check passwords. """
from passlib.context import CryptContext
pwd_context = CryptContext(
schemes=[
"ldap_sha512_crypt",
"ldap_salted_sha1",
"ldap_md5",
"ldap_sha1",
"ldap_salted_md5",
"ldap_des_crypt",
"ldap_md5_crypt",
],
default="ldap_sha512_crypt",
)
def check_password(password: str, encrypted: str) -> bool:
""" Check a plaintext password against a hashed password. """
# some old passwords have {crypt} in lower case, and passlib wants it to be
# in upper case.
if encrypted.startswith("{crypt}"):
encrypted = "{CRYPT}" + encrypted[7:]
return pwd_context.verify(password, encrypted)
def encode_password(password: str) -> str:
""" Encode a password. """
return pwd_context.hash(password)
|
fitermay/intellij-community
|
refs/heads/master
|
python/helpers/py3only/docutils/transforms/peps.py
|
44
|
# $Id: peps.py 6433 2010-09-28 08:21:25Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Transforms for PEP processing.
- `Headers`: Used to transform a PEP's initial RFC-2822 header. It remains a
field list, but some entries get processed.
- `Contents`: Auto-inserts a table of contents.
- `PEPZero`: Special processing for PEP 0.
"""
__docformat__ = 'reStructuredText'
import os
import re
import time
from docutils import DataError
from docutils import nodes, utils, languages
from docutils.transforms import Transform
from docutils.transforms import parts, references, misc
class Headers(Transform):
"""
Process fields in a PEP's initial RFC-2822 header.
"""
default_priority = 360
pep_url = 'pep-%04d'
pep_cvs_url = ('http://svn.python.org/view/*checkout*'
'/peps/trunk/pep-%04d.txt')
rcs_keyword_substitutions = (
(re.compile(r'\$' r'RCSfile: (.+),v \$$', re.IGNORECASE), r'\1'),
(re.compile(r'\$[a-zA-Z]+: (.+) \$$'), r'\1'),)
def apply(self):
if not len(self.document):
# @@@ replace these DataErrors with proper system messages
raise DataError('Document tree is empty.')
header = self.document[0]
if not isinstance(header, nodes.field_list) or \
'rfc2822' not in header['classes']:
raise DataError('Document does not begin with an RFC-2822 '
'header; it is not a PEP.')
pep = None
for field in header:
if field[0].astext().lower() == 'pep': # should be the first field
value = field[1].astext()
try:
pep = int(value)
cvs_url = self.pep_cvs_url % pep
except ValueError:
pep = value
cvs_url = None
msg = self.document.reporter.warning(
'"PEP" header must contain an integer; "%s" is an '
'invalid value.' % pep, base_node=field)
msgid = self.document.set_id(msg)
prb = nodes.problematic(value, value or '(none)',
refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
if len(field[1]):
field[1][0][:] = [prb]
else:
field[1] += nodes.paragraph('', '', prb)
break
if pep is None:
raise DataError('Document does not contain an RFC-2822 "PEP" '
'header.')
if pep == 0:
# Special processing for PEP 0.
pending = nodes.pending(PEPZero)
self.document.insert(1, pending)
self.document.note_pending(pending)
if len(header) < 2 or header[1][0].astext().lower() != 'title':
raise DataError('No title!')
for field in header:
name = field[0].astext().lower()
body = field[1]
if len(body) > 1:
raise DataError('PEP header field body contains multiple '
'elements:\n%s' % field.pformat(level=1))
elif len(body) == 1:
if not isinstance(body[0], nodes.paragraph):
raise DataError('PEP header field body may only contain '
'a single paragraph:\n%s'
% field.pformat(level=1))
elif name == 'last-modified':
date = time.strftime(
'%d-%b-%Y',
time.localtime(os.stat(self.document['source'])[8]))
if cvs_url:
body += nodes.paragraph(
'', '', nodes.reference('', date, refuri=cvs_url))
else:
# empty
continue
para = body[0]
if name == 'author':
for node in para:
if isinstance(node, nodes.reference):
node.replace_self(mask_email(node))
elif name == 'discussions-to':
for node in para:
if isinstance(node, nodes.reference):
node.replace_self(mask_email(node, pep))
elif name in ('replaces', 'replaced-by', 'requires'):
newbody = []
space = nodes.Text(' ')
for refpep in re.split(',?\s+', body.astext()):
pepno = int(refpep)
newbody.append(nodes.reference(
refpep, refpep,
refuri=(self.document.settings.pep_base_url
+ self.pep_url % pepno)))
newbody.append(space)
para[:] = newbody[:-1] # drop trailing space
elif name == 'last-modified':
utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions)
if cvs_url:
date = para.astext()
para[:] = [nodes.reference('', date, refuri=cvs_url)]
elif name == 'content-type':
pep_type = para.astext()
uri = self.document.settings.pep_base_url + self.pep_url % 12
para[:] = [nodes.reference('', pep_type, refuri=uri)]
elif name == 'version' and len(body):
utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions)
class Contents(Transform):
"""
Insert an empty table of contents topic and a transform placeholder into
the document after the RFC 2822 header.
"""
default_priority = 380
def apply(self):
language = languages.get_language(self.document.settings.language_code,
self.document.reporter)
name = language.labels['contents']
title = nodes.title('', name)
topic = nodes.topic('', title, classes=['contents'])
name = nodes.fully_normalize_name(name)
if not self.document.has_name(name):
topic['names'].append(name)
self.document.note_implicit_target(topic)
pending = nodes.pending(parts.Contents)
topic += pending
self.document.insert(1, topic)
self.document.note_pending(pending)
class TargetNotes(Transform):
"""
Locate the "References" section, insert a placeholder for an external
target footnote insertion transform at the end, and schedule the
transform to run immediately.
"""
default_priority = 520
def apply(self):
doc = self.document
i = len(doc) - 1
refsect = copyright = None
while i >= 0 and isinstance(doc[i], nodes.section):
title_words = doc[i][0].astext().lower().split()
if 'references' in title_words:
refsect = doc[i]
break
elif 'copyright' in title_words:
copyright = i
i -= 1
if not refsect:
refsect = nodes.section()
refsect += nodes.title('', 'References')
doc.set_id(refsect)
if copyright:
# Put the new "References" section before "Copyright":
doc.insert(copyright, refsect)
else:
# Put the new "References" section at end of doc:
doc.append(refsect)
pending = nodes.pending(references.TargetNotes)
refsect.append(pending)
self.document.note_pending(pending, 0)
pending = nodes.pending(misc.CallBack,
details={'callback': self.cleanup_callback})
refsect.append(pending)
self.document.note_pending(pending, 1)
def cleanup_callback(self, pending):
"""
Remove an empty "References" section.
Called after the `references.TargetNotes` transform is complete.
"""
if len(pending.parent) == 2: # <title> and <pending>
pending.parent.parent.remove(pending.parent)
class PEPZero(Transform):
"""
Special processing for PEP 0.
"""
default_priority =760
def apply(self):
visitor = PEPZeroSpecial(self.document)
self.document.walk(visitor)
self.startnode.parent.remove(self.startnode)
class PEPZeroSpecial(nodes.SparseNodeVisitor):
"""
Perform the special processing needed by PEP 0:
- Mask email addresses.
- Link PEP numbers in the second column of 4-column tables to the PEPs
themselves.
"""
pep_url = Headers.pep_url
def unknown_visit(self, node):
pass
def visit_reference(self, node):
node.replace_self(mask_email(node))
def visit_field_list(self, node):
if 'rfc2822' in node['classes']:
raise nodes.SkipNode
def visit_tgroup(self, node):
self.pep_table = node['cols'] == 4
self.entry = 0
def visit_colspec(self, node):
self.entry += 1
if self.pep_table and self.entry == 2:
node['classes'].append('num')
def visit_row(self, node):
self.entry = 0
def visit_entry(self, node):
self.entry += 1
if self.pep_table and self.entry == 2 and len(node) == 1:
node['classes'].append('num')
p = node[0]
if isinstance(p, nodes.paragraph) and len(p) == 1:
text = p.astext()
try:
pep = int(text)
ref = (self.document.settings.pep_base_url
+ self.pep_url % pep)
p[0] = nodes.reference(text, text, refuri=ref)
except ValueError:
pass
non_masked_addresses = ('peps@python.org',
'python-list@python.org',
'python-dev@python.org')
def mask_email(ref, pepno=None):
"""
Mask the email address in `ref` and return a replacement node.
`ref` is returned unchanged if it contains no email address.
For email addresses such as "user@host", mask the address as "user at
host" (text) to thwart simple email address harvesters (except for those
listed in `non_masked_addresses`). If a PEP number (`pepno`) is given,
return a reference including a default email subject.
"""
if ref.hasattr('refuri') and ref['refuri'].startswith('mailto:'):
if ref['refuri'][8:] in non_masked_addresses:
replacement = ref[0]
else:
replacement_text = ref.astext().replace('@', ' at ')
replacement = nodes.raw('', replacement_text, format='html')
if pepno is None:
return replacement
else:
ref['refuri'] += '?subject=PEP%%20%s' % pepno
ref[:] = [replacement]
return ref
else:
return ref
|
ericsnowcurrently/micropython
|
refs/heads/master
|
tests/import/pkg3/subpkg1/__init__.py
|
118
|
print("subpkg1 __name__:", __name__)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.