code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Yannick Vaucher, Emanuel Cino
#
# The licence is in the file __openerp__.py
#
##############################################################################
import base64
import urllib2
import httplib
import simplejson
from openerp.tests import common
class TestOnramp(common.HttpCase):
""" Base class for all Onramp tests. """
def setUp(self):
super(TestOnramp, self).setUp()
self.server_url = self.env['ir.config_parameter'].get_param(
'web.base.url',
default='http://localhost:8069'
)
api_client_secret = base64.b64encode("client:secret")
self.rest_url = '{0}/onramp?secret={1}'.format(
self.server_url, api_client_secret)
params_post = 'grant_type=client_credentials&scope=read+write'
header_post = {
"Authorization": "Basic " + api_client_secret,
"Content-type": "application/x-www-form-urlencoded",
"Content-Length": 46,
"Expect": "100-continue",
"Connection": "Keep-Alive",
}
conn = httplib.HTTPSConnection('api2.compassion.com')
conn.request("POST", "/core/connect/token", params_post, header_post)
response = conn.getresponse()
data_token = simplejson.loads(response.read())
conn.close()
self.headers = {
'Content-type': 'application/json',
'Authorization': '{token_type} {access_token}'.format(
**data_token),
"x-cim-MessageType": "http://schemas.ci.org/ci/services/"
"communications/2015/09/SBCStructured",
"x-cim-FromAddress": "CHTest",
"x-cim-ToAddress": "CH",
}
def _test_no_token(self):
""" Check we have an access denied if token is not provided
"""
del self.headers['Authorization']
with self.assertRaises(urllib2.HTTPError) as e:
self._send_post({'nothing': 'nothing'})
self.assertEqual(e.exception.code, 401)
self.assertEqual(e.exception.msg, 'UNAUTHORIZED')
def _test_bad_token(self):
""" Check we have an access denied if token is not valid
"""
self.headers['Authorization'] = 'Bearer notarealtoken'
with self.assertRaises(urllib2.HTTPError) as e:
self._send_post({'nothing': 'nothing'})
self.assertEqual(e.exception.code, 401)
self.assertEqual(e.exception.msg, 'UNAUTHORIZED')
def _test_body_no_json(self):
req = urllib2.Request(self.rest_url, "This is not json", self.headers)
with self.assertRaises(urllib2.HTTPError):
urllib2.urlopen(req)
def _send_post(self, vals):
data = simplejson.dumps(vals)
req = urllib2.Request(self.rest_url, data, self.headers)
return urllib2.urlopen(req)
|
emgirardin/compassion-modules
|
message_center_compassion/tests/base_test_onramp.py
|
Python
|
agpl-3.0
| 3,074
|
# -*- coding: utf-8 -*-
#
# Connectors documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 4 11:35:44 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.abspath('_themes'))
sys.path.append(os.path.abspath('../../../odoo'))
# Load OpenERP with correct addons-path so the doc can be built even if
# the addon import modules from other branches
import openerp
BASE_PATH = os.path.abspath(os.path.join(os.getcwd(), '../../..'))
# You may need to change with your own paths
ADDONS_PATHS = ('odoo/openerp/addons',
'odoo/addons',
'connector',
'connector-ecommerce',
'e-commerce',
'sale-workflow',
'product-attribute',
'connector-magento')
pathes = [os.path.join(BASE_PATH, path) for path in ADDONS_PATHS]
options = ['--addons-path', ','.join(pathes)]
openerp.tools.config.parse_config(options)
os.environ['TZ'] = 'UTC'
openerp.service.start_internal()
# -- General configuration --------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode']
todo_include_todos = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'OpenERP Magento Connector'
copyright = u'2013, Camptocamp SA'
# The version info for the project you're documenting, acts as
# replacement for |version| and |release|, also used in various other
# places throughout the built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to
# documentation for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in
# the output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation
# for a list of builtin themes.
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see
# the documentation.
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': "OpenERP Magento Connector",
# Tab name for entire site. (Default: "Site")
'navbar_site_name': "Site",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 2,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the
# build will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "footer",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing with "" (default) or the name of a valid theme
# such as "amelia" or "cosmo".
#
# Note that this is served off CDN, so won't be available offline.
'bootswatch_theme': "united",
}
# Add any paths that contain custom themes here, relative to this
# directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default
# is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is
# True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'openerp-magento-connector-doc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples (source
# start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'openerp-magento-connector.tex',
u'OpenERP Magento Connector Documentation',
u'Camptocamp SA', 'manual'),
]
# The name of an image file (relative to this directory) to place at the
# top of the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are
# parts, not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output -----------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'openerp-magento-connector',
u'OpenERP Magento Connector Documentation',
[u'Camptocamp SA'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'OpenERP Magento Connector',
u'OpenERP Magento Connector Documentation',
u'Camptocamp SA', 'OpenERP Magento Connector',
'Connector between OpenERP and Magento',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard
# library.
intersphinx_mapping = {
'python': ('http://docs.python.org/2.6', None),
'openerpweb': ('http://doc.openerp.com/trunk/developers/web', None),
'openerpdev': ('http://doc.openerp.com/trunk/developers', None),
'openerpconnector': ('http://www.openerp-connector.com', None),
}
|
akretion/connector-magento
|
magentoerpconnect/doc/conf.py
|
Python
|
agpl-3.0
| 10,704
|
import matplotlib.pyplot as plt
import numpy as np
from ad3 import simple_grid, general_graph
def example_binary():
# generate trivial data
x = np.ones((10, 10))
x[:, 5:] = -1
x_noisy = x + np.random.normal(0, 0.8, size=x.shape)
x_thresh = x_noisy > .0
# create unaries
unaries = x_noisy
# as we convert to int, we need to multipy to get sensible values
unaries = np.dstack([-unaries, unaries])
# create potts pairwise
pairwise = np.eye(2)
# do simple cut
result = np.argmax(simple_grid(unaries, pairwise)[0], axis=-1)
# use the gerneral graph algorithm
# first, we construct the grid graph
inds = np.arange(x.size).reshape(x.shape)
horz = np.c_[inds[:, :-1].ravel(), inds[:, 1:].ravel()]
vert = np.c_[inds[:-1, :].ravel(), inds[1:, :].ravel()]
edges = np.vstack([horz, vert])
# we flatten the unaries
pairwise_per_edge = np.repeat(pairwise[np.newaxis, :, :], edges.shape[0],
axis=0)
result_graph = np.argmax(general_graph(unaries.reshape(-1, 2), edges,
pairwise_per_edge)[0], axis=-1)
# plot results
plt.subplot(231, title="original")
plt.imshow(x, interpolation='nearest')
plt.subplot(232, title="noisy version")
plt.imshow(x_noisy, interpolation='nearest')
plt.subplot(234, title="thresholding result")
plt.imshow(x_thresh, interpolation='nearest')
plt.subplot(235, title="cut_simple")
plt.imshow(result, interpolation='nearest')
plt.subplot(236, title="cut_from_graph")
plt.imshow(result_graph.reshape(x.shape), interpolation='nearest')
plt.show()
def example_multinomial():
# generate dataset with three stripes
np.random.seed(4)
x = np.zeros((10, 12, 3))
x[:, :4, 0] = 1
x[:, 4:8, 1] = 1
x[:, 8:, 2] = 1
unaries = x + 1.5 * np.random.normal(size=x.shape)
x = np.argmax(x, axis=2)
unaries = unaries
x_thresh = np.argmax(unaries, axis=2)
# potts potential
pairwise_potts = 2 * np.eye(3)
result = np.argmax(simple_grid(unaries, pairwise_potts)[0], axis=-1)
# potential that penalizes 0-1 and 1-2 less than 0-2
pairwise_1d = 2 * np.eye(3) + 2
pairwise_1d[-1, 0] = 0
pairwise_1d[0, -1] = 0
print(pairwise_1d)
result_1d = np.argmax(simple_grid(unaries, pairwise_1d)[0], axis=-1)
plt.subplot(141, title="original")
plt.imshow(x, interpolation="nearest")
plt.subplot(142, title="thresholded unaries")
plt.imshow(x_thresh, interpolation="nearest")
plt.subplot(143, title="potts potentials")
plt.imshow(result, interpolation="nearest")
plt.subplot(144, title="1d topology potentials")
plt.imshow(result_1d, interpolation="nearest")
plt.show()
#example_binary()
example_multinomial()
|
PhdDone/AD3
|
python/example.py
|
Python
|
lgpl-3.0
| 2,817
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
__license__ = 'Public Domain'
import codecs
import io
import os
import random
import sys
from .options import (
parseOpts,
)
from .compat import (
compat_expanduser,
compat_getpass,
compat_shlex_split,
workaround_optparse_bug9161,
)
from .utils import (
DateRange,
decodeOption,
DEFAULT_OUTTMPL,
DownloadError,
match_filter_func,
MaxDownloadsReached,
preferredencoding,
read_batch_urls,
SameFileError,
setproctitle,
std_headers,
write_string,
render_table,
)
from .update import update_self
from .downloader import (
FileDownloader,
)
from .extractor import gen_extractors, list_extractors
from .extractor.adobepass import MSO_INFO
from .YoutubeDL import YoutubeDL
def _real_main(argv=None):
# Compatibility fixes for Windows
if sys.platform == 'win32':
# https://github.com/rg3/youtube-dl/issues/820
codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None)
workaround_optparse_bug9161()
setproctitle('youtube-dl')
parser, opts, args = parseOpts(argv)
# Set user agent
if opts.user_agent is not None:
std_headers['User-Agent'] = opts.user_agent
# Set referer
if opts.referer is not None:
std_headers['Referer'] = opts.referer
# Custom HTTP headers
if opts.headers is not None:
for h in opts.headers:
if ':' not in h:
parser.error('wrong header formatting, it should be key:value, not "%s"' % h)
key, value = h.split(':', 1)
if opts.verbose:
write_string('[debug] Adding header from command line option %s:%s\n' % (key, value))
std_headers[key] = value
# Dump user agent
if opts.dump_user_agent:
write_string(std_headers['User-Agent'] + '\n', out=sys.stdout)
sys.exit(0)
# Batch file verification
batch_urls = []
if opts.batchfile is not None:
try:
if opts.batchfile == '-':
batchfd = sys.stdin
else:
batchfd = io.open(
compat_expanduser(opts.batchfile),
'r', encoding='utf-8', errors='ignore')
batch_urls = read_batch_urls(batchfd)
if opts.verbose:
write_string('[debug] Batch file urls: ' + repr(batch_urls) + '\n')
except IOError:
sys.exit('ERROR: batch file could not be read')
all_urls = batch_urls + args
all_urls = [url.strip() for url in all_urls]
_enc = preferredencoding()
all_urls = [url.decode(_enc, 'ignore') if isinstance(url, bytes) else url for url in all_urls]
if opts.list_extractors:
for ie in list_extractors(opts.age_limit):
write_string(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else '') + '\n', out=sys.stdout)
matchedUrls = [url for url in all_urls if ie.suitable(url)]
for mu in matchedUrls:
write_string(' ' + mu + '\n', out=sys.stdout)
sys.exit(0)
if opts.list_extractor_descriptions:
for ie in list_extractors(opts.age_limit):
if not ie._WORKING:
continue
desc = getattr(ie, 'IE_DESC', ie.IE_NAME)
if desc is False:
continue
if hasattr(ie, 'SEARCH_KEY'):
_SEARCHES = ('cute kittens', 'slithering pythons', 'falling cat', 'angry poodle', 'purple fish', 'running tortoise', 'sleeping bunny', 'burping cow')
_COUNTS = ('', '5', '10', 'all')
desc += ' (Example: "%s%s:%s" )' % (ie.SEARCH_KEY, random.choice(_COUNTS), random.choice(_SEARCHES))
write_string(desc + '\n', out=sys.stdout)
sys.exit(0)
if opts.ap_list_mso:
table = [[mso_id, mso_info['name']] for mso_id, mso_info in MSO_INFO.items()]
write_string('Supported TV Providers:\n' + render_table(['mso', 'mso name'], table) + '\n', out=sys.stdout)
sys.exit(0)
# Conflicting, missing and erroneous options
if opts.usenetrc and (opts.username is not None or opts.password is not None):
parser.error('using .netrc conflicts with giving username/password')
if opts.password is not None and opts.username is None:
parser.error('account username missing\n')
if opts.ap_password is not None and opts.ap_username is None:
parser.error('TV Provider account username missing\n')
if opts.outtmpl is not None and (opts.usetitle or opts.autonumber or opts.useid):
parser.error('using output template conflicts with using title, video ID or auto number')
if opts.usetitle and opts.useid:
parser.error('using title conflicts with using video ID')
if opts.username is not None and opts.password is None:
opts.password = compat_getpass('Type account password and press [Return]: ')
if opts.ap_username is not None and opts.ap_password is None:
opts.ap_password = compat_getpass('Type TV provider account password and press [Return]: ')
if opts.ratelimit is not None:
numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
if numeric_limit is None:
parser.error('invalid rate limit specified')
opts.ratelimit = numeric_limit
if opts.min_filesize is not None:
numeric_limit = FileDownloader.parse_bytes(opts.min_filesize)
if numeric_limit is None:
parser.error('invalid min_filesize specified')
opts.min_filesize = numeric_limit
if opts.max_filesize is not None:
numeric_limit = FileDownloader.parse_bytes(opts.max_filesize)
if numeric_limit is None:
parser.error('invalid max_filesize specified')
opts.max_filesize = numeric_limit
if opts.sleep_interval is not None:
if opts.sleep_interval < 0:
parser.error('sleep interval must be positive or 0')
if opts.max_sleep_interval is not None:
if opts.max_sleep_interval < 0:
parser.error('max sleep interval must be positive or 0')
if opts.max_sleep_interval < opts.sleep_interval:
parser.error('max sleep interval must be greater than or equal to min sleep interval')
else:
opts.max_sleep_interval = opts.sleep_interval
if opts.ap_mso and opts.ap_mso not in MSO_INFO:
parser.error('Unsupported TV Provider, use --ap-list-mso to get a list of supported TV Providers')
def parse_retries(retries):
if retries in ('inf', 'infinite'):
parsed_retries = float('inf')
else:
try:
parsed_retries = int(retries)
except (TypeError, ValueError):
parser.error('invalid retry count specified')
return parsed_retries
if opts.retries is not None:
opts.retries = parse_retries(opts.retries)
if opts.fragment_retries is not None:
opts.fragment_retries = parse_retries(opts.fragment_retries)
if opts.buffersize is not None:
numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize)
if numeric_buffersize is None:
parser.error('invalid buffer size specified')
opts.buffersize = numeric_buffersize
if opts.playliststart <= 0:
raise ValueError('Playlist start must be positive')
if opts.playlistend not in (-1, None) and opts.playlistend < opts.playliststart:
raise ValueError('Playlist end must be greater than playlist start')
if opts.extractaudio:
if opts.audioformat not in ['best', 'aac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav']:
parser.error('invalid audio format specified')
if opts.audioquality:
opts.audioquality = opts.audioquality.strip('k').strip('K')
if not opts.audioquality.isdigit():
parser.error('invalid audio quality specified')
if opts.recodevideo is not None:
if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv', 'avi']:
parser.error('invalid video recode format specified')
if opts.convertsubtitles is not None:
if opts.convertsubtitles not in ['srt', 'vtt', 'ass']:
parser.error('invalid subtitle format specified')
if opts.date is not None:
date = DateRange.day(opts.date)
else:
date = DateRange(opts.dateafter, opts.datebefore)
# Do not download videos when there are audio-only formats
if opts.extractaudio and not opts.keepvideo and opts.format is None:
opts.format = 'bestaudio/best'
# --all-sub automatically sets --write-sub if --write-auto-sub is not given
# this was the old behaviour if only --all-sub was given.
if opts.allsubtitles and not opts.writeautomaticsub:
opts.writesubtitles = True
outtmpl = ((opts.outtmpl is not None and opts.outtmpl) or
(opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s') or
(opts.format == '-1' and '%(id)s-%(format)s.%(ext)s') or
(opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s') or
(opts.usetitle and '%(title)s-%(id)s.%(ext)s') or
(opts.useid and '%(id)s.%(ext)s') or
(opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s') or
DEFAULT_OUTTMPL)
if not os.path.splitext(outtmpl)[1] and opts.extractaudio:
parser.error('Cannot download a video and extract audio into the same'
' file! Use "{0}.%(ext)s" instead of "{0}" as the output'
' template'.format(outtmpl))
any_getting = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson or opts.dump_single_json
any_printing = opts.print_json
download_archive_fn = compat_expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive
# PostProcessors
postprocessors = []
# Add the metadata pp first, the other pps will copy it
if opts.metafromtitle:
postprocessors.append({
'key': 'MetadataFromTitle',
'titleformat': opts.metafromtitle
})
if opts.addmetadata:
postprocessors.append({'key': 'FFmpegMetadata'})
if opts.extractaudio:
postprocessors.append({
'key': 'FFmpegExtractAudio',
'preferredcodec': opts.audioformat,
'preferredquality': opts.audioquality,
'nopostoverwrites': opts.nopostoverwrites,
})
if opts.recodevideo:
postprocessors.append({
'key': 'FFmpegVideoConvertor',
'preferedformat': opts.recodevideo,
})
if opts.convertsubtitles:
postprocessors.append({
'key': 'FFmpegSubtitlesConvertor',
'format': opts.convertsubtitles,
})
if opts.embedsubtitles:
postprocessors.append({
'key': 'FFmpegEmbedSubtitle',
})
if opts.embedthumbnail:
already_have_thumbnail = opts.writethumbnail or opts.write_all_thumbnails
postprocessors.append({
'key': 'EmbedThumbnail',
'already_have_thumbnail': already_have_thumbnail
})
if not already_have_thumbnail:
opts.writethumbnail = True
# XAttrMetadataPP should be run after post-processors that may change file
# contents
if opts.xattrs:
postprocessors.append({'key': 'XAttrMetadata'})
# Please keep ExecAfterDownload towards the bottom as it allows the user to modify the final file in any way.
# So if the user is able to remove the file before your postprocessor runs it might cause a few problems.
if opts.exec_cmd:
postprocessors.append({
'key': 'ExecAfterDownload',
'exec_cmd': opts.exec_cmd,
})
external_downloader_args = None
if opts.external_downloader_args:
external_downloader_args = compat_shlex_split(opts.external_downloader_args)
postprocessor_args = None
if opts.postprocessor_args:
postprocessor_args = compat_shlex_split(opts.postprocessor_args)
match_filter = (
None if opts.match_filter is None
else match_filter_func(opts.match_filter))
ydl_opts = {
'usenetrc': opts.usenetrc,
'username': opts.username,
'password': opts.password,
'twofactor': opts.twofactor,
'videopassword': opts.videopassword,
'ap_mso': opts.ap_mso,
'ap_username': opts.ap_username,
'ap_password': opts.ap_password,
'quiet': (opts.quiet or any_getting or any_printing),
'no_warnings': opts.no_warnings,
'forceurl': opts.geturl,
'forcetitle': opts.gettitle,
'forceid': opts.getid,
'forcethumbnail': opts.getthumbnail,
'forcedescription': opts.getdescription,
'forceduration': opts.getduration,
'forcefilename': opts.getfilename,
'forceformat': opts.getformat,
'forcejson': opts.dumpjson or opts.print_json,
'dump_single_json': opts.dump_single_json,
'simulate': opts.simulate or any_getting,
'skip_download': opts.skip_download,
'format': opts.format,
'listformats': opts.listformats,
'outtmpl': outtmpl,
'autonumber_size': opts.autonumber_size,
'restrictfilenames': opts.restrictfilenames,
'ignoreerrors': opts.ignoreerrors,
'force_generic_extractor': opts.force_generic_extractor,
'ratelimit': opts.ratelimit,
'nooverwrites': opts.nooverwrites,
'retries': opts.retries,
'fragment_retries': opts.fragment_retries,
'skip_unavailable_fragments': opts.skip_unavailable_fragments,
'buffersize': opts.buffersize,
'noresizebuffer': opts.noresizebuffer,
'continuedl': opts.continue_dl,
'noprogress': opts.noprogress,
'progress_with_newline': opts.progress_with_newline,
'playliststart': opts.playliststart,
'playlistend': opts.playlistend,
'playlistreverse': opts.playlist_reverse,
'noplaylist': opts.noplaylist,
'logtostderr': opts.outtmpl == '-',
'consoletitle': opts.consoletitle,
'nopart': opts.nopart,
'updatetime': opts.updatetime,
'writedescription': opts.writedescription,
'writeannotations': opts.writeannotations,
'writeinfojson': opts.writeinfojson,
'writethumbnail': opts.writethumbnail,
'write_all_thumbnails': opts.write_all_thumbnails,
'writesubtitles': opts.writesubtitles,
'writeautomaticsub': opts.writeautomaticsub,
'allsubtitles': opts.allsubtitles,
'listsubtitles': opts.listsubtitles,
'subtitlesformat': opts.subtitlesformat,
'subtitleslangs': opts.subtitleslangs,
'matchtitle': decodeOption(opts.matchtitle),
'rejecttitle': decodeOption(opts.rejecttitle),
'max_downloads': opts.max_downloads,
'prefer_free_formats': opts.prefer_free_formats,
'verbose': opts.verbose,
'dump_intermediate_pages': opts.dump_intermediate_pages,
'write_pages': opts.write_pages,
'test': opts.test,
'keepvideo': opts.keepvideo,
'min_filesize': opts.min_filesize,
'max_filesize': opts.max_filesize,
'min_views': opts.min_views,
'max_views': opts.max_views,
'daterange': date,
'cachedir': opts.cachedir,
'youtube_print_sig_code': opts.youtube_print_sig_code,
'age_limit': opts.age_limit,
'download_archive': download_archive_fn,
'cookiefile': opts.cookiefile,
'nocheckcertificate': opts.no_check_certificate,
'prefer_insecure': opts.prefer_insecure,
'proxy': opts.proxy,
'socket_timeout': opts.socket_timeout,
'bidi_workaround': opts.bidi_workaround,
'debug_printtraffic': opts.debug_printtraffic,
'prefer_ffmpeg': opts.prefer_ffmpeg,
'include_ads': opts.include_ads,
'default_search': opts.default_search,
'youtube_include_dash_manifest': opts.youtube_include_dash_manifest,
'encoding': opts.encoding,
'extract_flat': opts.extract_flat,
'mark_watched': opts.mark_watched,
'merge_output_format': opts.merge_output_format,
'postprocessors': postprocessors,
'fixup': opts.fixup,
'source_address': opts.source_address,
'call_home': opts.call_home,
'sleep_interval': opts.sleep_interval,
'max_sleep_interval': opts.max_sleep_interval,
'external_downloader': opts.external_downloader,
'list_thumbnails': opts.list_thumbnails,
'playlist_items': opts.playlist_items,
'xattr_set_filesize': opts.xattr_set_filesize,
'match_filter': match_filter,
'no_color': opts.no_color,
'ffmpeg_location': opts.ffmpeg_location,
'hls_prefer_native': opts.hls_prefer_native,
'hls_use_mpegts': opts.hls_use_mpegts,
'external_downloader_args': external_downloader_args,
'postprocessor_args': postprocessor_args,
'cn_verification_proxy': opts.cn_verification_proxy,
'geo_verification_proxy': opts.geo_verification_proxy,
}
with YoutubeDL(ydl_opts) as ydl:
# Update version
if opts.update_self:
update_self(ydl.to_screen, opts.verbose, ydl._opener)
# Remove cache dir
if opts.rm_cachedir:
ydl.cache.remove()
# Maybe do nothing
if (len(all_urls) < 1) and (opts.load_info_filename is None):
if opts.update_self or opts.rm_cachedir:
sys.exit()
ydl.warn_if_short_id(sys.argv[1:] if argv is None else argv)
parser.error(
'You must provide at least one URL.\n'
'Type youtube-dl --help to see a list of all options.')
try:
if opts.load_info_filename is not None:
retcode = ydl.download_with_info_file(compat_expanduser(opts.load_info_filename))
else:
retcode = ydl.download(all_urls)
except MaxDownloadsReached:
ydl.to_screen('--max-download limit reached, aborting.')
retcode = 101
sys.exit(retcode)
def main(argv=None):
try:
_real_main(argv)
except DownloadError:
sys.exit(1)
except SameFileError:
sys.exit('ERROR: fixed output name but more than one file to download')
except KeyboardInterrupt:
sys.exit('\nERROR: Interrupted by user')
__all__ = ['main', 'YoutubeDL', 'gen_extractors', 'list_extractors']
|
jbuchbinder/youtube-dl
|
youtube_dl/__init__.py
|
Python
|
unlicense
| 18,757
|
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets a team by its ID.
To determine which teams exist, run get_all_teams.py.
Tags: TeamService.getTeam
"""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
team_service = client.GetService('TeamService', version='v201306')
# Set the ID of the team to get.
team_id = 'INSERT_TEAM_ID_HERE'
# Get team.
team = team_service.GetTeam(team_id)[0]
# Display results.
print ('Team with ID \'%s\' and name \'%s\' was found.'
% (team['id'], team['name']))
|
caioserra/apiAdwords
|
examples/adspygoogle/dfp/v201306/get_team.py
|
Python
|
apache-2.0
| 1,511
|
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import eventlet
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent import rpc as agent_rpc
from neutron.common import constants as constants
from neutron.common import topics
from neutron.common import utils
from neutron import context
from neutron import manager
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.openstack.common.notifier import api as notifier_api
from neutron.openstack.common import periodic_task
from neutron.openstack.common.rpc import proxy
from neutron.openstack.common import service
from neutron import service as neutron_service
LOG = logging.getLogger(__name__)
class MeteringPluginRpc(proxy.RpcProxy):
BASE_RPC_API_VERSION = '1.0'
def __init__(self, host):
super(MeteringPluginRpc,
self).__init__(topic=topics.METERING_AGENT,
default_version=self.BASE_RPC_API_VERSION)
def _get_sync_data_metering(self, context):
try:
return self.call(context,
self.make_msg('get_sync_data_metering',
host=self.host),
topic=topics.METERING_PLUGIN)
except Exception:
LOG.exception(_("Failed synchronizing routers"))
class MeteringAgent(MeteringPluginRpc, manager.Manager):
Opts = [
cfg.StrOpt('driver',
default='neutron.services.metering.drivers.noop.'
'noop_driver.NoopMeteringDriver',
help=_("Metering driver")),
cfg.IntOpt('measure_interval', default=30,
help=_("Interval between two metering measures")),
cfg.IntOpt('report_interval', default=300,
help=_("Interval between two metering reports")),
]
def __init__(self, host, conf=None):
self.conf = conf or cfg.CONF
self._load_drivers()
self.root_helper = config.get_root_helper(self.conf)
self.context = context.get_admin_context_without_session()
self.metering_info = {}
self.metering_loop = loopingcall.FixedIntervalLoopingCall(
self._metering_loop
)
measure_interval = self.conf.measure_interval
self.last_report = 0
self.metering_loop.start(interval=measure_interval)
self.host = host
self.label_tenant_id = {}
self.routers = {}
self.metering_infos = {}
super(MeteringAgent, self).__init__(host=self.conf.host)
def _load_drivers(self):
"""Loads plugin-driver from configuration."""
LOG.info(_("Loading Metering driver %s"), self.conf.driver)
if not self.conf.driver:
raise SystemExit(_('A metering driver must be specified'))
self.metering_driver = importutils.import_object(
self.conf.driver, self, self.conf)
def _metering_notification(self):
for label_id, info in self.metering_infos.items():
data = {'label_id': label_id,
'tenant_id': self.label_tenant_id.get(label_id),
'pkts': info['pkts'],
'bytes': info['bytes'],
'time': info['time'],
'first_update': info['first_update'],
'last_update': info['last_update'],
'host': self.host}
LOG.debug(_("Send metering report: %s"), data)
notifier_api.notify(self.context,
notifier_api.publisher_id('metering'),
'l3.meter',
notifier_api.CONF.default_notification_level,
data)
info['pkts'] = 0
info['bytes'] = 0
info['time'] = 0
def _purge_metering_info(self):
ts = int(time.time())
report_interval = self.conf.report_interval
for label_id, info in self.metering_info.items():
if info['last_update'] > ts + report_interval:
del self.metering_info[label_id]
def _add_metering_info(self, label_id, pkts, bytes):
ts = int(time.time())
info = self.metering_infos.get(label_id, {'bytes': 0,
'pkts': 0,
'time': 0,
'first_update': ts,
'last_update': ts})
info['bytes'] += bytes
info['pkts'] += pkts
info['time'] += ts - info['last_update']
info['last_update'] = ts
self.metering_infos[label_id] = info
return info
def _add_metering_infos(self):
self.label_tenant_id = {}
for router in self.routers.values():
tenant_id = router['tenant_id']
labels = router.get(constants.METERING_LABEL_KEY, [])
for label in labels:
label_id = label['id']
self.label_tenant_id[label_id] = tenant_id
tenant_id = self.label_tenant_id.get
accs = self._get_traffic_counters(self.context, self.routers.values())
if not accs:
return
for label_id, acc in accs.items():
self._add_metering_info(label_id, acc['pkts'], acc['bytes'])
def _metering_loop(self):
self._add_metering_infos()
ts = int(time.time())
delta = ts - self.last_report
report_interval = self.conf.report_interval
if delta > report_interval:
self._metering_notification()
self._purge_metering_info()
self.last_report = ts
@utils.synchronized('metering-agent')
def _invoke_driver(self, context, meterings, func_name):
try:
return getattr(self.metering_driver, func_name)(context, meterings)
except RuntimeError:
LOG.exception(_("Driver %(driver)s does not implement %(func)s"),
{'driver': cfg.CONF.metering_driver,
'func': func_name})
@periodic_task.periodic_task(run_immediately=True)
def _sync_routers_task(self, context):
routers = self._get_sync_data_metering(self.context)
if not routers:
return
self._update_routers(context, routers)
def router_deleted(self, context, router_id):
self._add_metering_infos()
if router_id in self.routers:
del self.routers[router_id]
return self._invoke_driver(context, router_id,
'remove_router')
def routers_updated(self, context, routers=None):
if not routers:
routers = self._get_sync_data_metering(self.context)
if not routers:
return
self._update_routers(context, routers)
def _update_routers(self, context, routers):
for router in routers:
self.routers[router['id']] = router
return self._invoke_driver(context, routers,
'update_routers')
def _get_traffic_counters(self, context, routers):
LOG.debug(_("Get router traffic counters"))
return self._invoke_driver(context, routers, 'get_traffic_counters')
def update_metering_label_rules(self, context, routers):
LOG.debug(_("Update metering rules from agent"))
return self._invoke_driver(context, routers,
'update_metering_label_rules')
def add_metering_label(self, context, routers):
LOG.debug(_("Creating a metering label from agent"))
return self._invoke_driver(context, routers,
'add_metering_label')
def remove_metering_label(self, context, routers):
self._add_metering_infos()
LOG.debug(_("Delete a metering label from agent"))
return self._invoke_driver(context, routers,
'remove_metering_label')
class MeteringAgentWithStateReport(MeteringAgent):
def __init__(self, host, conf=None):
super(MeteringAgentWithStateReport, self).__init__(host=host,
conf=conf)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self.agent_state = {
'binary': 'neutron-metering-agent',
'host': host,
'topic': topics.METERING_AGENT,
'configurations': {
'metering_driver': self.conf.driver,
'measure_interval':
self.conf.measure_interval,
'report_interval': self.conf.report_interval
},
'start_flag': True,
'agent_type': constants.AGENT_TYPE_METERING}
report_interval = cfg.CONF.AGENT.report_interval
self.use_call = True
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
self.heartbeat.start(interval=report_interval)
def _report_state(self):
try:
self.state_rpc.report_state(self.context, self.agent_state,
self.use_call)
self.agent_state.pop('start_flag', None)
self.use_call = False
except AttributeError:
# This means the server does not support report_state
LOG.warn(_("Neutron server does not support state report."
" State report for this agent will be disabled."))
self.heartbeat.stop()
return
except Exception:
LOG.exception(_("Failed reporting state!"))
def agent_updated(self, context, payload):
LOG.info(_("agent_updated by server side %s!"), payload)
def main():
eventlet.monkey_patch()
conf = cfg.CONF
conf.register_opts(MeteringAgent.Opts)
config.register_agent_state_opts_helper(conf)
config.register_root_helper(conf)
conf(project='neutron')
config.setup_logging(conf)
server = neutron_service.Service.create(
binary='neutron-metering-agent',
topic=topics.METERING_AGENT,
report_interval=cfg.CONF.AGENT.report_interval,
manager='neutron.services.metering.agents.'
'metering_agent.MeteringAgentWithStateReport')
service.launch(server).wait()
|
netscaler/neutron
|
neutron/services/metering/agents/metering_agent.py
|
Python
|
apache-2.0
| 11,158
|
# -*- coding: utf-8 -*-
import sys
import os
# Import the common config file
# Note that paths in the common config are interpreted as if they were
# in the location of this file
sys.path.insert(0, os.path.abspath('../../_common'))
from common_conf import *
# Override the common config
html_short_title_toc = manuals_dict["faqs"]
html_short_title = u'CDAP %s' % html_short_title_toc
html_context = {"html_short_title_toc":html_short_title_toc}
# Remove this guide from the mapping as it will fail as it has been deleted by clean
intersphinx_mapping.pop("faqs", None)
html_theme = 'cdap-faqs'
|
chtyim/cdap
|
cdap-docs/faqs/source/conf.py
|
Python
|
apache-2.0
| 603
|
"""
relu where each channel has a different leak rate
"""
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
fX = theano.config.floatX
@treeano.register_node("interval_relu")
class IntervalReLUNode(treeano.NodeImpl):
hyperparameter_names = ("leak_min",
"leak_max")
def compute_output(self, network, in_vw):
leak_min = network.find_hyperparameter(["leak_min"], 0)
leak_max = network.find_hyperparameter(["leak_max"], 1)
num_channels = in_vw.shape[1]
alpha = np.linspace(leak_min, leak_max, num_channels).astype(fX)
pattern = ["x" if i != 1 else 0 for i in range(in_vw.ndim)]
alpha_var = T.constant(alpha).dimshuffle(*pattern)
out_var = treeano.utils.rectify(in_vw.variable,
negative_coefficient=alpha_var)
network.create_vw(
"default",
variable=out_var,
shape=in_vw.shape,
tags={"output"},
)
|
diogo149/treeano
|
treeano/sandbox/nodes/interval_relu.py
|
Python
|
apache-2.0
| 1,043
|
#!/usr/bin/env python
# Copyright 2011 Google Inc. All Rights Reserved.
"""Parser for IE index.dat files.
Note that this is a very naive and incomplete implementation and should be
replaced with a more intelligent one. Do not implement anything based on this
code, it is a placeholder for something real.
For anyone who wants a useful reference, see this:
http://heanet.dl.sourceforge.net/project/libmsiecf/Documentation/MSIE%20Cache%20
File%20format/MSIE%20Cache%20File%20%28index.dat%29%20format.pdf
"""
import datetime
import glob
import operator
import os
import struct
import sys
import urlparse
import logging
from grr.lib import parsers
from grr.lib import rdfvalue
# Difference between 1 Jan 1601 and 1 Jan 1970.
WIN_UNIX_DIFF_MSECS = 11644473600 * 1e6
class IEHistoryParser(parsers.FileParser):
"""Parse IE index.dat files into BrowserHistoryItem objects."""
output_types = ["BrowserHistoryItem"]
supported_artifacts = ["InternetExplorerHistory"]
def Parse(self, stat, file_object, knowledge_base):
"""Parse the History file."""
_, _ = stat, knowledge_base
# TODO(user): Convert this to use the far more intelligent plaso parser.
ie = IEParser(file_object)
for dat in ie.Parse():
yield rdfvalue.BrowserHistoryItem(
url=dat["url"], domain=urlparse.urlparse(dat["url"]).netloc,
access_time=dat.get("mtime"),
program_name="Internet Explorer", source_urn=stat.aff4path)
class IEParser(object):
"""Parser object for index.dat files.
The file format for IE index.dat files is somewhat poorly documented.
The following implementation is based on information from:
http://www.forensicswiki.org/wiki/Internet_Explorer_History_File_Format
Returns results in chronological order based on mtime
"""
FILE_HEADER = "Client UrlCache MMF Ver 5.2"
BLOCK_SIZE = 0x80
def __init__(self, input_obj):
"""Initialize.
Args:
input_obj: A file like object to read the index.dat from.
"""
self._file = input_obj
self._entries = []
def Parse(self):
"""Parse the file."""
if not self._file:
logging.error("Couldn't open file")
return
# Limit read size to 5MB.
self.input_dat = self._file.read(1024 * 1024 * 5)
if not self.input_dat.startswith(self.FILE_HEADER):
logging.error("Invalid index.dat file %s", self._file)
return
# Events aren't time ordered in the history file, so we collect them all
# then sort.
events = []
for event in self._DoParse():
events.append(event)
for event in sorted(events, key=operator.itemgetter("mtime")):
yield event
def _GetRecord(self, offset, record_size):
"""Retrieve a single record from the file.
Args:
offset: offset from start of input_dat where header starts
record_size: length of the header according to file (untrusted)
Returns:
A dict containing a single browser history record.
"""
record_header = "<4sLQQL"
get4 = lambda x: struct.unpack("<L", self.input_dat[x:x+4])[0]
url_offset = struct.unpack("B", self.input_dat[offset+52:offset+53])[0]
if url_offset in [0xFF, 0xFE]:
return None
data_offset = get4(offset + 68)
data_size = get4(offset + 72)
start_pos = offset + data_offset
data = struct.unpack("{0}s".format(data_size),
self.input_dat[start_pos:start_pos + data_size])[0]
fmt = record_header
unknown_size = url_offset - struct.calcsize(fmt)
fmt += "{0}s".format(unknown_size)
fmt += "{0}s".format(record_size - struct.calcsize(fmt))
dat = struct.unpack(fmt, self.input_dat[offset:offset+record_size])
header, blocks, mtime, ctime, ftime, _, url = dat
url = url.split(chr(0x00))[0]
if mtime: mtime = mtime/10 - WIN_UNIX_DIFF_MSECS
if ctime: ctime = ctime/10 - WIN_UNIX_DIFF_MSECS
return {"header": header, # the header
"blocks": blocks, # number of blocks
"urloffset": url_offset, # offset of URL in file
"data_offset": data_offset, # offset for start of data
"data_size": data_size, # size of data
"data": data, # actual data
"mtime": mtime, # modified time
"ctime": ctime, # created time
"ftime": ftime, # file time
"url": url # the url visited
}
def _DoParse(self):
"""Parse a file for history records yielding dicts.
Yields:
Dicts containing browser history
"""
get4 = lambda x: struct.unpack("<L", self.input_dat[x:x+4])[0]
filesize = get4(0x1c)
offset = get4(0x20)
coffset = offset
while coffset < filesize:
etype = struct.unpack("4s", self.input_dat[coffset:coffset + 4])[0]
if etype == "REDR":
pass
elif etype in ["URL "]:
# Found a valid record
reclen = get4(coffset + 4) * self.BLOCK_SIZE
yield self._GetRecord(coffset, reclen)
coffset += self.BLOCK_SIZE
def main(argv):
if len(argv) < 2:
print "Usage: {0} index.dat".format(os.path.basename(argv[0]))
else:
files_to_process = []
for input_glob in argv[1:]:
files_to_process += glob.glob(input_glob)
for input_file in files_to_process:
ie = IEParser(open(input_file))
for dat in ie.Parse():
dat["ctime"] = datetime.datetime.utcfromtimestamp(dat["ctime"]/1e6)
print "{ctime} {header} {url}".format(**dat)
if __name__ == "__main__":
main(sys.argv)
|
ForensicTools/GRREAT-475_2141-Chaigon-Failey-Siebert
|
parsers/ie_history.py
|
Python
|
apache-2.0
| 5,573
|
"""Support Google Home units."""
import logging
import asyncio
import voluptuous as vol
from homeassistant.const import CONF_DEVICES, CONF_HOST
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['googledevices==1.0.2']
DOMAIN = 'googlehome'
CLIENT = 'googlehome_client'
NAME = 'GoogleHome'
CONF_DEVICE_TYPES = 'device_types'
CONF_RSSI_THRESHOLD = 'rssi_threshold'
CONF_TRACK_ALARMS = 'track_alarms'
CONF_TRACK_DEVICES = 'track_devices'
DEVICE_TYPES = [1, 2, 3]
DEFAULT_RSSI_THRESHOLD = -70
DEVICE_CONFIG = vol.Schema({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_DEVICE_TYPES, default=DEVICE_TYPES):
vol.All(cv.ensure_list, [vol.In(DEVICE_TYPES)]),
vol.Optional(CONF_RSSI_THRESHOLD, default=DEFAULT_RSSI_THRESHOLD):
vol.Coerce(int),
vol.Optional(CONF_TRACK_ALARMS, default=False): cv.boolean,
vol.Optional(CONF_TRACK_DEVICES, default=True): cv.boolean,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_DEVICES): vol.All(cv.ensure_list, [DEVICE_CONFIG]),
}),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Set up the Google Home component."""
hass.data[DOMAIN] = {}
hass.data[CLIENT] = GoogleHomeClient(hass)
for device in config[DOMAIN][CONF_DEVICES]:
hass.data[DOMAIN][device['host']] = {}
if device[CONF_TRACK_DEVICES]:
hass.async_create_task(
discovery.async_load_platform(
hass, 'device_tracker', DOMAIN, device, config))
if device[CONF_TRACK_ALARMS]:
hass.async_create_task(
discovery.async_load_platform(
hass, 'sensor', DOMAIN, device, config))
return True
class GoogleHomeClient:
"""Handle all communication with the Google Home unit."""
def __init__(self, hass):
"""Initialize the Google Home Client."""
self.hass = hass
self._connected = None
async def update_info(self, host):
"""Update data from Google Home."""
from googledevices.api.connect import Cast
_LOGGER.debug("Updating Google Home info for %s", host)
session = async_get_clientsession(self.hass)
device_info = await Cast(host, self.hass.loop, session).info()
device_info_data = await device_info.get_device_info()
self._connected = bool(device_info_data)
self.hass.data[DOMAIN][host]['info'] = device_info_data
async def update_bluetooth(self, host):
"""Update bluetooth from Google Home."""
from googledevices.api.connect import Cast
_LOGGER.debug("Updating Google Home bluetooth for %s", host)
session = async_get_clientsession(self.hass)
bluetooth = await Cast(host, self.hass.loop, session).bluetooth()
await bluetooth.scan_for_devices()
await asyncio.sleep(5)
bluetooth_data = await bluetooth.get_scan_result()
self.hass.data[DOMAIN][host]['bluetooth'] = bluetooth_data
async def update_alarms(self, host):
"""Update alarms from Google Home."""
from googledevices.api.connect import Cast
_LOGGER.debug("Updating Google Home bluetooth for %s", host)
session = async_get_clientsession(self.hass)
assistant = await Cast(host, self.hass.loop, session).assistant()
alarms_data = await assistant.get_alarms()
self.hass.data[DOMAIN][host]['alarms'] = alarms_data
|
jamespcole/home-assistant
|
homeassistant/components/googlehome/__init__.py
|
Python
|
apache-2.0
| 3,612
|
import sys
import copy
import time
import asyncio
import hashlib
from unittest import mock
import celery
import pytest
from waterbutler import tasks # noqa
from waterbutler.core import remote_logging
from waterbutler.core import utils as core_utils
from waterbutler.core.path import WaterButlerPath
import tests.utils as test_utils
# Hack to get the module, not the function
move = sys.modules['waterbutler.tasks.move']
FAKE_TIME = 1454684930.0
@pytest.fixture(autouse=True)
def patch_backend(monkeypatch):
monkeypatch.setattr(move.core.app, 'backend', None)
@pytest.fixture(autouse=True)
def callback(monkeypatch):
mock_request = test_utils.MockCoroutine(
return_value=mock.Mock(
status=200,
read=test_utils.MockCoroutine(
return_value=b'meowmeowmeow'
)
)
)
monkeypatch.setattr(core_utils, 'send_signed_request', mock_request)
return mock_request
@pytest.fixture
def mock_time(monkeypatch):
mock_time = mock.Mock(return_value=FAKE_TIME)
monkeypatch.setattr(time, 'time', mock_time)
@pytest.fixture
def src_path():
return WaterButlerPath('/user/bin/python')
@pytest.fixture
def dest_path():
return WaterButlerPath('/usr/bin/golang')
@pytest.fixture(scope='function')
def src_provider():
p = test_utils.MockProvider()
p.move.return_value = (test_utils.MockFileMetadata(), True)
p.auth['callback_url'] = 'src_callback'
return p
@pytest.fixture(scope='function')
def dest_provider():
p = test_utils.MockProvider()
p.move.return_value = (test_utils.MockFileMetadata(), True)
p.auth['callback_url'] = 'dest_callback'
return p
@pytest.fixture(scope='function')
def providers(monkeypatch, src_provider, dest_provider):
def make_provider(name=None, **kwargs):
if name == 'src':
return src_provider
if name == 'dest':
return dest_provider
raise ValueError('Unexpected provider')
monkeypatch.setattr(move.utils, 'make_provider', make_provider)
return src_provider, dest_provider
@pytest.fixture(autouse=True)
def log_to_keen(monkeypatch):
mock_log_to_keen = test_utils.MockCoroutine()
monkeypatch.setattr(remote_logging, 'log_to_keen', mock_log_to_keen)
return mock_log_to_keen
@pytest.fixture
def src_bundle(src_path):
return {
'nid': 'mst3k',
'path': src_path,
'provider': {
'name': 'src',
'auth': {
'callback_url': '',
},
'settings': {},
'credentials': {},
}
}
@pytest.fixture
def dest_bundle(dest_path):
return {
'nid': 'fbi4u',
'path': dest_path,
'provider': {
'name': 'dest',
'auth': {
'callback_url': '',
},
'settings': {},
'credentials': {},
}
}
@pytest.fixture
def bundles(src_bundle, dest_bundle):
return src_bundle, dest_bundle
class TestMoveTask:
def test_move_calls_move(self, event_loop, providers, bundles):
src, dest = providers
src_bundle, dest_bundle = bundles
move.move(copy.deepcopy(src_bundle), copy.deepcopy(dest_bundle))
assert src.move.called
src.move.assert_called_once_with(dest, src_bundle['path'], dest_bundle['path'])
def test_is_task(self):
assert callable(move.move)
assert isinstance(move.move, celery.Task)
assert not asyncio.iscoroutine(move.move)
assert asyncio.iscoroutinefunction(move.move.adelay)
def test_imputes_exceptions(self, event_loop, providers, bundles, callback):
src, dest = providers
src_bundle, dest_bundle = bundles
src.move.side_effect = Exception('This is a string')
with pytest.raises(Exception):
move.move(copy.deepcopy(src_bundle), copy.deepcopy(dest_bundle))
(method, url, data), _ = callback.call_args_list[0]
assert src.move.called
src.move.assert_called_once_with(dest, src_bundle['path'], dest_bundle['path'])
assert method == 'PUT'
assert data['errors'] == ["Exception('This is a string',)"]
assert url == 'dest_callback'
def test_return_values(self, event_loop, providers, bundles, callback, src_path, dest_path, mock_time):
src, dest = providers
src_bundle, dest_bundle = bundles
metadata = test_utils.MockFileMetadata()
src.move.return_value = (metadata, False)
ret1, ret2 = move.move(copy.deepcopy(src_bundle), copy.deepcopy(dest_bundle))
assert (ret1, ret2) == (metadata, False)
(method, url, data), _ = callback.call_args_list[0]
assert method == 'PUT'
assert url == 'dest_callback'
assert data['action'] == 'move'
assert data['auth'] == {'callback_url': 'dest_callback'}
assert data['email'] == False
assert data['errors'] == []
assert data['time'] == FAKE_TIME + 60
assert data['source'] == {
'nid': 'mst3k',
'resource': 'mst3k',
'path': '/' + src_path.raw_path,
'name': src_path.name,
'materialized': str(src_path),
'provider': src.NAME,
'kind': 'file',
'extra': {},
}
assert data['destination'] == {
'nid': 'fbi4u',
'resource': 'fbi4u',
'path': metadata.path,
'name': metadata.name,
'materialized': metadata.path,
'provider': dest.NAME,
'kind': 'file',
'contentType': metadata.content_type,
'etag': hashlib.sha256(
'{}::{}'.format(metadata.provider, metadata.etag)
.encode('utf-8')
).hexdigest(),
'extra': metadata.extra,
'modified': metadata.modified,
'modified_utc': metadata.modified_utc,
'created_utc': metadata.created_utc,
'size': metadata.size,
}
def test_starttime_override(self, event_loop, providers, bundles, callback, mock_time):
src, dest = providers
src_bundle, dest_bundle = bundles
stamp = FAKE_TIME
move.move(copy.deepcopy(src_bundle), copy.deepcopy(dest_bundle), start_time=stamp-100)
move.move(copy.deepcopy(src_bundle), copy.deepcopy(dest_bundle), start_time=stamp+100)
(_, _, data), _ = callback.call_args_list[0]
assert data['email'] is True
assert data['time'] == 60 + stamp
(_, _, data), _ = callback.call_args_list[1]
assert data['email'] is False
assert data['time'] == 60 + stamp
|
TomBaxter/waterbutler
|
tests/tasks/test_move.py
|
Python
|
apache-2.0
| 6,692
|
#!/usr/bin/env python
"""Tests for HTTP API."""
import json
from grr.gui import api_aff4_object_renderers
from grr.gui import api_call_renderers
from grr.gui import http_api
from grr.lib import flags
from grr.lib import registry
from grr.lib import test_lib
from grr.lib import utils
from grr.lib.rdfvalues import structs as rdf_structs
from grr.proto import tests_pb2
class SampleGetRendererArgs(rdf_structs.RDFProtoStruct):
protobuf = tests_pb2.SampleGetRendererArgs
class SampleGetRenderer(api_call_renderers.ApiCallRenderer):
args_type = SampleGetRendererArgs
def Render(self, args, token=None):
return {
"method": "GET",
"path": args.path,
"foo": args.foo
}
class SampleGetRendererWithAdditionalArgsArgs(rdf_structs.RDFProtoStruct):
protobuf = tests_pb2.SampleGetRendererWithAdditionalArgsArgs
class SampleGetRendererWithAdditionalArgs(api_call_renderers.ApiCallRenderer):
args_type = SampleGetRendererWithAdditionalArgsArgs
additional_args_types = {
"AFF4Object": api_aff4_object_renderers.ApiAFF4ObjectRendererArgs,
"RDFValueCollection": (api_aff4_object_renderers.
ApiRDFValueCollectionRendererArgs)
}
def Render(self, args, token=None):
result = {
"method": "GET",
"path": args.path,
"foo": args.foo
}
if args.additional_args:
rendered_additional_args = []
for arg in args.additional_args:
rendered_additional_args.append(str(arg))
result["additional_args"] = rendered_additional_args
return result
class TestHttpRoutingInit(registry.InitHook):
def RunOnce(self):
http_api.RegisterHttpRouteHandler(
"GET", "/test_sample/<path:path>", SampleGetRenderer)
http_api.RegisterHttpRouteHandler(
"GET", "/test_sample_with_additional_args/<path:path>",
SampleGetRendererWithAdditionalArgs)
class RenderHttpResponseTest(test_lib.GRRBaseTest):
"""Test for api_call_renderers.RenderHttpResponse logic."""
def _CreateRequest(self, method, path, query_parameters=None):
if not query_parameters:
query_parameters = {}
request = utils.DataObject()
request.method = method
request.path = path
request.scheme = "http"
request.environ = {
"SERVER_NAME": "foo.bar",
"SERVER_PORT": 1234
}
request.user = "test"
if method == "GET":
request.GET = query_parameters
request.META = {}
return request
def _RenderResponse(self, request):
response = http_api.RenderHttpResponse(request)
if response.content.startswith(")]}'\n"):
response.content = response.content[5:]
return response
def testReturnsRendererMatchingUrlAndMethod(self):
renderer, _ = http_api.GetRendererForHttpRequest(
self._CreateRequest("GET", "/test_sample/some/path"))
self.assertTrue(isinstance(renderer, SampleGetRenderer))
def testPathParamsAreReturnedWithMatchingRenderer(self):
_, path_params = http_api.GetRendererForHttpRequest(
self._CreateRequest("GET", "/test_sample/some/path"))
self.assertEqual(path_params, {"path": "some/path"})
def testRaisesIfNoRendererMatchesUrl(self):
self.assertRaises(api_call_renderers.ApiCallRendererNotFoundError,
http_api.GetRendererForHttpRequest,
self._CreateRequest("GET",
"/some/missing/path"))
def testRendersGetRendererCorrectly(self):
response = self._RenderResponse(
self._CreateRequest("GET", "/test_sample/some/path"))
self.assertEqual(
json.loads(response.content),
{"method": "GET",
"path": "some/path",
"foo": ""})
self.assertEqual(response.status_code, 200)
def testQueryParamsArePassedIntoRendererArgs(self):
response = self._RenderResponse(
self._CreateRequest("GET", "/test_sample/some/path",
query_parameters={"foo": "bar"}))
self.assertEqual(
json.loads(response.content),
{"method": "GET",
"path": "some/path",
"foo": "bar"})
def testRouteArgumentTakesPrecedenceOverQueryParams(self):
response = self._RenderResponse(
self._CreateRequest("GET", "/test_sample/some/path",
query_parameters={"path": "foobar"}))
self.assertEqual(
json.loads(response.content),
{"method": "GET",
"path": "some/path",
"foo": ""})
def testAdditionalArgumentsAreParsedCorrectly(self):
additional_args = http_api.FillAdditionalArgsFromRequest(
{
"AFF4Object.limit_lists": "10",
"RDFValueCollection.with_total_count": "1"
}, {
"AFF4Object": api_aff4_object_renderers.ApiAFF4ObjectRendererArgs,
"RDFValueCollection":
api_aff4_object_renderers.ApiRDFValueCollectionRendererArgs
})
additional_args = sorted(additional_args, key=lambda x: x.name)
self.assertListEqual(
[x.name for x in additional_args],
["AFF4Object", "RDFValueCollection"])
self.assertListEqual(
[x.type for x in additional_args],
["ApiAFF4ObjectRendererArgs", "ApiRDFValueCollectionRendererArgs"])
self.assertListEqual(
[x.args for x in additional_args],
[api_aff4_object_renderers.ApiAFF4ObjectRendererArgs(limit_lists=10),
api_aff4_object_renderers.ApiRDFValueCollectionRendererArgs(
with_total_count=True)])
def testAdditionalArgumentsAreFoundAndPassedToTheRenderer(self):
response = self._RenderResponse(
self._CreateRequest("GET",
"/test_sample_with_additional_args/some/path",
query_parameters={"foo": "42"}))
self.assertEqual(
json.loads(response.content),
{"method": "GET",
"path": "some/path",
"foo": "42"})
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
pchaigno/grr
|
gui/http_api_test.py
|
Python
|
apache-2.0
| 6,014
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Arithmetic Operators
TensorFlow provides several operations that you can use to add basic arithmetic
operators to your graph.
@@add
@@sub
@@mul
@@div
@@truediv
@@floordiv
@@mod
@@cross
## Basic Math Functions
TensorFlow provides several operations that you can use to add basic
mathematical functions to your graph.
@@add_n
@@abs
@@neg
@@sign
@@inv
@@square
@@round
@@sqrt
@@rsqrt
@@pow
@@exp
@@log
@@ceil
@@floor
@@maximum
@@minimum
@@cos
@@sin
@@lbeta
@@lgamma
@@digamma
@@erf
@@erfc
@@squared_difference
@@igamma
@@igammac
@@zeta
@@polygamma
## Matrix Math Functions
TensorFlow provides several operations that you can use to add linear algebra
functions on matrices to your graph.
@@batch_matrix_diag
@@batch_matrix_diag_part
@@batch_matrix_band_part
@@diag
@@diag_part
@@trace
@@transpose
@@matmul
@@batch_matmul
@@matrix_determinant
@@batch_matrix_determinant
@@matrix_inverse
@@batch_matrix_inverse
@@cholesky
@@batch_cholesky
@@cholesky_solve
@@batch_cholesky_solve
@@self_adjoint_eig
@@batch_self_adjoint_eig
@@matrix_solve
@@batch_matrix_solve
@@matrix_triangular_solve
@@batch_matrix_triangular_solve
@@matrix_solve_ls
@@batch_matrix_solve_ls
## Complex Number Functions
TensorFlow provides several operations that you can use to add complex number
functions to your graph.
@@complex
@@complex_abs
@@conj
@@imag
@@real
@@fft
@@ifft
@@fft2d
@@ifft2d
@@fft3d
@@ifft3d
@@batch_fft
@@batch_ifft
@@batch_fft2d
@@batch_ifft2d
@@batch_fft3d
@@batch_ifft3d
## Reduction
TensorFlow provides several operations that you can use to perform
common math computations that reduce various dimensions of a tensor.
@@reduce_sum
@@reduce_prod
@@reduce_min
@@reduce_max
@@reduce_mean
@@reduce_all
@@reduce_any
@@accumulate_n
## Segmentation
TensorFlow provides several operations that you can use to perform common
math computations on tensor segments.
Here a segmentation is a partitioning of a tensor along
the first dimension, i.e. it defines a mapping from the first dimension onto
`segment_ids`. The `segment_ids` tensor should be the size of
the first dimension, `d0`, with consecutive IDs in the range `0` to `k`,
where `k<d0`.
In particular, a segmentation of a matrix tensor is a mapping of rows to
segments.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.segment_sum(c, tf.constant([0, 0, 1]))
==> [[0 0 0 0]
[5 6 7 8]]
```
@@segment_sum
@@segment_prod
@@segment_min
@@segment_max
@@segment_mean
@@unsorted_segment_sum
@@sparse_segment_sum
@@sparse_segment_mean
@@sparse_segment_sqrt_n
## Sequence Comparison and Indexing
TensorFlow provides several operations that you can use to add sequence
comparison and index extraction to your graph. You can use these operations to
determine sequence differences and determine the indexes of specific values in
a tensor.
@@argmin
@@argmax
@@listdiff
@@where
@@unique
@@edit_distance
@@invert_permutation
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six.moves
from tensorflow.python.client import graph_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import common_shapes
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import state_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_math_ops import *
# pylint: enable=wildcard-import
# Aliases for some automatically-generated names.
argmax = gen_math_ops.arg_max
argmin = gen_math_ops.arg_min
linspace = gen_math_ops.lin_space
# pylint: disable=anomalous-backslash-in-string,protected-access
def abs(x, name=None):
"""Computes the absolute value of a tensor.
Given a tensor of real numbers `x`, this operation returns a tensor
containing the absolute value of each element in `x`. For example, if x is
an input element and y is an output element, this operation computes
\\\\(y = |x|\\\\).
See [`tf.complex_abs()`](#tf_complex_abs) to compute the absolute value of a complex
number.
Args:
x: A `Tensor` of type `float`, `double`, `int32`, or `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` the same size and type as `x` with absolute values.
"""
with ops.op_scope([x], name, "Abs") as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype in (dtypes.complex64, dtypes.complex128):
return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)
return gen_math_ops._abs(x, name=name)
def complex_abs(x, name=None):
r"""Computes the complex absolute value of a tensor.
Given a tensor `x` of complex numbers, this operation returns a tensor of type
`float` or `double` that is the absolute value of each element in `x`. All
elements in `x` must be complex numbers of the form \\(a + bj\\). The
absolute value is computed as \\( \sqrt{a^2 + b^2}\\).
For example:
```
# tensor 'x' is [[-2.25 + 4.75j], [-3.25 + 5.75j]]
tf.complex_abs(x) ==> [5.25594902, 6.60492229]
```
Args:
x: A `Tensor` of type `complex64` or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)
def scalar_mul(scalar, x):
"""Multiplies a scalar times a `Tensor` or `IndexedSlices` object.
Intended for use in gradient code which might deal with `IndexedSlices`
objects, which are easy to multiply by a scalar but more expensive to
multiply with arbitrary tensors.
Args:
scalar: A 0-D scalar `Tensor`. Must have known shape.
x: A `Tensor` or `IndexedSlices` to be scaled.
Returns:
`scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.
Raises:
ValueError: if scalar is not a 0-D `scalar`.
"""
scalar = ops.convert_to_tensor(scalar, dtype=x.dtype, name="scalar")
shape = scalar.get_shape()
if shape.ndims == 0:
if isinstance(x, ops.IndexedSlices):
return ops.IndexedSlices(scalar * x.values, x.indices, x.dense_shape)
else:
return scalar * x
else:
raise ValueError("Only scalar multiply works, got shape %s" % shape)
def pow(x, y, name=None):
"""Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for
corresponding elements in `x` and `y`. For example:
```
# tensor 'x' is [[2, 2], [3, 3]]
# tensor 'y' is [[8, 16], [2, 3]]
tf.pow(x, y) ==> [[256, 65536], [9, 27]]
```
Args:
x: A `Tensor` of type `float`, `double`, `int32`, `complex64`, or `int64`.
y: A `Tensor` of type `float`, `double`, `int32`, `complex64`, or `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor`.
"""
with ops.op_scope([x], name, "Pow") as name:
return gen_math_ops._pow(x, y, name=name)
def complex(real, imag, name=None):
"""Converts two real numbers to a complex number.
Given a tensor `real` representing the real part of a complex number, and a
tensor `imag` representing the imaginary part of a complex number, this
operation returns complex numbers elementwise of the form \\(a + bj\\), where
*a* represents the `real` part and *b* represents the `imag` part.
The input tensors `real` and `imag` must have the same shape.
For example:
```
# tensor 'real' is [2.25, 3.25]
# tensor `imag` is [4.75, 5.75]
tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
```
Args:
real: A `Tensor`. Must be one of the following types: `float32`, `float64`.
imag: A `Tensor`. Must have the same type as `real`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64` or `complex128`.
"""
real = ops.convert_to_tensor(real, name="real")
imag = ops.convert_to_tensor(imag, name="imag")
with ops.op_scope([real, imag], name, "Complex") as name:
input_types = (real.dtype, imag.dtype)
if input_types == (dtypes.float64, dtypes.float64):
Tout = dtypes.complex128
elif input_types == (dtypes.float32, dtypes.float32):
Tout = dtypes.complex64
else:
raise TypeError("Types of real and imag don't match: "
"{} {}".format(real.dtype.name, imag.dtype.name))
return gen_math_ops._complex(real, imag, Tout=Tout, name=name)
def real(input, name=None):
"""Returns the real part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float` or `double` that is the real part of each element in `input`.
All elements in `input` must be complex numbers of the form \\(a + bj\\),
where *a* is the real part returned by this operation and *b* is the
imaginary part.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.real(input) ==> [-2.25, 3.25]
```
Args:
input: A `Tensor`. Must be one of the following types: `complex64`,
`complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float` or `double`.
"""
with ops.op_scope([input], name, "Real") as name:
return gen_math_ops.real(input, Tout=input.dtype.real_dtype, name=name)
def imag(input, name=None):
"""Returns the imaginary part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float` or `double` that is the imaginary part of each element in
`input`. All elements in `input` must be complex numbers of the form \\(a +
bj\\), where *a* is the real part and *b* is the imaginary part returned by
this operation.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.imag(input) ==> [4.75, 5.75]
```
Args:
input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float` or `double`.
"""
with ops.op_scope([input], name, "Imag") as name:
return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)
def round(x, name=None):
"""Rounds the values of a tensor to the nearest integer, element-wise.
For example:
```python
# 'a' is [0.9, 2.5, 2.3, -4.4]
tf.round(a) ==> [ 1.0, 3.0, 2.0, -4.0 ]
```
Args:
x: A `Tensor` of type `float` or `double`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as `x`.
"""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return x
else:
return gen_math_ops.floor(x + 0.5, name=name)
def cast(x, dtype, name=None):
"""Casts a tensor to a new type.
The operation casts `x` (in case of `Tensor`) or `x.values`
(in case of `SparseTensor`) to `dtype`.
For example:
```python
# tensor `a` is [1.8, 2.2], dtype=tf.float
tf.cast(a, tf.int32) ==> [1, 2] # dtype=tf.int32
```
Args:
x: A `Tensor` or `SparseTensor`.
dtype: The destination type.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x`.
Raises:
TypeError: If `x` cannot be cast to the `dtype`.
"""
with ops.op_scope([x], name, "Cast") as name:
if isinstance(x, ops.SparseTensor):
values_cast = cast(x.values, dtype, name=name)
return ops.SparseTensor(x.indices, values_cast, x.shape)
else:
# TODO(touts): Handle what Josh said.
#
# Could return ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
# allows some conversions that cast() can't do, e.g. casting numbers to
# strings.
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype == dtype:
return x
return gen_math_ops.cast(x, dtype, name=name)
def saturate_cast(value, dtype, name=None):
"""Performs a safe saturating cast of `value` to `dtype`.
This function casts the input to `dtype` without applying any scaling. If
there is a danger that values would over or underflow in the cast, this op
applies the appropriate clamping before the cast.
Args:
value: A `Tensor`.
dtype: The desired output `DType`.
name: A name for the operation (optional).
Returns:
`value` safely cast to `dtype`.
"""
# When casting to a type with smaller representable range, clamp.
# Note that this covers casting to unsigned types as well.
with ops.op_scope([value], name, "saturate_cast") as name:
value = ops.convert_to_tensor(value, name="value")
dtype = dtypes.as_dtype(dtype).base_dtype
if value.dtype.min < dtype.min:
value = gen_math_ops.maximum(value, ops.convert_to_tensor(
dtype.min, dtype=value.dtype, name="min"))
if value.dtype.max > dtype.max:
value = gen_math_ops.minimum(value, ops.convert_to_tensor(
dtype.max, dtype=value.dtype, name="max"))
return cast(value, dtype, name=name)
def to_float(x, name="ToFloat"):
"""Casts a tensor to type `float32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float32`.
Raises:
TypeError: If `x` cannot be cast to the `float32`.
"""
return cast(x, dtypes.float32, name=name)
def to_double(x, name="ToDouble"):
"""Casts a tensor to type `float64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float64`.
Raises:
TypeError: If `x` cannot be cast to the `float64`.
"""
return cast(x, dtypes.float64, name=name)
def to_int32(x, name="ToInt32"):
"""Casts a tensor to type `int32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int32`.
Raises:
TypeError: If `x` cannot be cast to the `int32`.
"""
return cast(x, dtypes.int32, name=name)
def to_int64(x, name="ToInt64"):
"""Casts a tensor to type `int64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int64`.
Raises:
TypeError: If `x` cannot be cast to the `int64`.
"""
return cast(x, dtypes.int64, name=name)
def to_bfloat16(x, name="ToBFloat16"):
"""Casts a tensor to type `bfloat16`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `bfloat16`.
Raises:
TypeError: If `x` cannot be cast to the `bfloat16`.
"""
return cast(x, dtypes.bfloat16, name=name)
ops.Tensor._override_operator("__neg__", gen_math_ops.neg)
ops.Tensor._override_operator("__abs__", abs)
# __invert__ corresponds to the ~ operator. Here we follow the numpy convention
# ~ marks an elementwise bit-wise inverse. This is only implemented for boolean
# tensors and will throw a TypeError if used on nonboolean arrays
ops.Tensor._override_operator("__invert__", gen_math_ops.logical_not)
def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor):
"""Register operators with different tensor and scalar versions.
If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices,
sp_values, sp_shape, dense)` and outputs `(new_sp_values)`.
Args:
func: the operator
op_name: name of the operator being overridden
clazz_object: class to override for. Either `Tensor` or `SparseTensor`.
"""
def binary_op_wrapper(x, y):
with ops.op_scope([x, y], None, op_name) as name:
if not isinstance(y, ops.SparseTensor):
y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
return func(x, y, name=name)
def binary_op_wrapper_sparse(sp_x, y):
with ops.op_scope([sp_x, y], None, op_name) as name:
y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name="y")
return ops.SparseTensor(sp_x.indices, func(sp_x.indices, sp_x.values,
sp_x.shape, y, name=name),
sp_x.shape)
def r_binary_op_wrapper(y, x):
with ops.op_scope([x, y], None, op_name) as name:
x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x")
return func(x, y, name=name)
if clazz_object is ops.Tensor:
clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper)
del binary_op_wrapper
clazz_object._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
del r_binary_op_wrapper
else:
clazz_object._override_operator("__%s__" % op_name,
binary_op_wrapper_sparse)
del binary_op_wrapper_sparse
# Conversion table for __truediv__. None entries mean no conversion required.
_TRUEDIV_TABLE = {
dtypes.uint8: dtypes.float32,
dtypes.int8: dtypes.float32,
dtypes.int16: dtypes.float32,
dtypes.int32: dtypes.float64,
dtypes.int64: dtypes.float64,
dtypes.float16: None,
dtypes.float32: None,
dtypes.float64: None,
dtypes.complex64: None,
dtypes.complex128: None,
}
# NOTE: the support of "sparse (true)div dense" is currently not baked in into
# "tf.(true_)div()". Until such an API decision is made, the supported usage is
# to explicitly use the "/" operator to invoke either truediv or div.
def _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None):
"""Internal helper function for 'sp_t / dense_t'."""
with ops.op_scope([sp_indices, sp_values, sp_shape, y],
name, "truediv") as name:
sp_values = ops.convert_to_tensor(sp_values, name="sp_values")
y = ops.convert_to_tensor(y, name="y")
x_dtype = sp_values.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
sp_values = cast(sp_values, dtype)
y = cast(y, dtype)
return gen_sparse_ops.sparse_dense_cwise_div(sp_indices, sp_values,
sp_shape, y, name=name)
def truediv(x, y, name=None):
"""Divides x / y elementwise, always producing floating point results.
The same as `tf.div` for floating point arguments, but casts integer arguments
to floating point before dividing so that the result is always floating point.
This op is generated by normal `x / y` division in Python 3 and in Python 2.7
with `from __future__ import division`. If you want integer division that
rounds down, use `x // y` or `tf.floordiv`.
`x` and `y` must have the same numeric type. If the inputs are floating
point, the output will have the same type. If the inputs are integral, the
inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
and `int64` (matching the behavior of Numpy).
Args:
x: `Tensor` numerator of numeric type.
y: `Tensor` denominator of numeric type.
name: A name for the operation (optional).
Returns:
`x / y` evaluated in floating point.
Raises:
TypeError: If `x` and `y` have different dtypes.
"""
with ops.op_scope([x, y], name, "truediv") as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y")
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
x = cast(x, dtype)
y = cast(y, dtype)
return gen_math_ops.div(x, y, name=name)
def floordiv(x, y, name=None):
"""Divides `x / y` elementwise, rounding down for floating point.
The same as `tf.div(x,y)` for integers, but uses `tf.floor(tf.div(x,y))` for
floating point arguments so that the result is always an integer (though
possibly an integer represented as floating point). This op is generated by
`x // y` floor division in Python 3 and in Python 2.7 with
`from __future__ import division`.
Note that for efficiency, `floordiv` uses C semantics for negative numbers
(unlike Python and Numpy).
`x` and `y` must have the same type, and the result will have the same type
as well.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` rounded down (except possibly towards zero for negative integers).
Raises:
TypeError: If the inputs are complex.
"""
with ops.op_scope([x, y], name, "floordiv") as name:
x = ops.convert_to_tensor(x, name="x")
dtype = x.dtype
if dtype.is_floating:
return gen_math_ops.floor(gen_math_ops.div(x, y), name=name)
else:
if not dtype.is_integer:
raise TypeError("Expected floating point or integer, got %r" % dtype)
return gen_math_ops.div(x, y, name=name)
def _mul_dispatch(x, y, name=None):
"""Dispatches cwise mul for "Dense*Dense" and "Dense*Sparse"."""
is_tensor_y = isinstance(y, ops.Tensor)
if is_tensor_y:
return gen_math_ops.mul(x, y, name=name)
else:
assert isinstance(y, ops.SparseTensor) # Case: Dense * Sparse.
new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values,
y.shape, x, name)
return ops.SparseTensor(y.indices, new_vals, y.shape)
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, "div",
ops.SparseTensor)
_OverrideBinaryOperatorHelper(_sparse_dense_truediv, "truediv",
ops.SparseTensor)
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, "mul",
ops.SparseTensor)
_OverrideBinaryOperatorHelper(gen_math_ops.add, "add")
_OverrideBinaryOperatorHelper(gen_math_ops.sub, "sub")
_OverrideBinaryOperatorHelper(_mul_dispatch, "mul")
_OverrideBinaryOperatorHelper(gen_math_ops.div, "div")
_OverrideBinaryOperatorHelper(truediv, "truediv")
_OverrideBinaryOperatorHelper(floordiv, "floordiv")
_OverrideBinaryOperatorHelper(gen_math_ops.mod, "mod")
_OverrideBinaryOperatorHelper(pow, "pow")
def logical_xor(x, y, name="LogicalXor"):
"""x ^ y = (x | y) & ~(x & y)."""
# TODO(alemi) Make this a cwise op if people end up relying on it.
return gen_math_ops.logical_and(
gen_math_ops.logical_or(x, y),
gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)),
name=name)
_OverrideBinaryOperatorHelper(gen_math_ops.logical_and, "and")
_OverrideBinaryOperatorHelper(gen_math_ops.logical_or, "or")
_OverrideBinaryOperatorHelper(logical_xor, "xor")
ops.Tensor._override_operator("__lt__", gen_math_ops.less)
ops.Tensor._override_operator("__le__", gen_math_ops.less_equal)
ops.Tensor._override_operator("__gt__", gen_math_ops.greater)
ops.Tensor._override_operator("__ge__", gen_math_ops.greater_equal)
def range(start, limit=None, delta=1, name="range"):
"""Creates a sequence of integers.
Creates a sequence of integers that begins at `start` and extends by
increments of `delta` up to but not including `limit`.
Like the Python builtin `range`, `start` defaults to 0, so that
`range(n) = range(0, n)`.
For example:
```
# 'start' is 3
# 'limit' is 18
# 'delta' is 3
tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
# 'limit' is 5
tf.range(limit) ==> [0, 1, 2, 3, 4]
```
Args:
start: A 0-D (scalar) of type `int32`. First entry in sequence.
Defaults to 0.
limit: A 0-D (scalar) of type `int32`. Upper limit of sequence,
exclusive.
delta: A 0-D `Tensor` (scalar) of type `int32`. Optional. Default is 1.
Number that increments `start`.
name: A name for the operation (optional).
Returns:
An 1-D `int32` `Tensor`.
"""
if limit is None:
start, limit = 0, start
return gen_math_ops._range(start, limit, delta, name=name)
@ops.RegisterShape("Range")
def _RangeShape(op):
start_value = tensor_util.constant_value(op.inputs[0])
limit_value = tensor_util.constant_value(op.inputs[1])
delta_value = tensor_util.constant_value(op.inputs[2])
if start_value is None or limit_value is None or delta_value is None:
return [tensor_shape.vector(None)]
else:
return [tensor_shape.vector((limit_value - start_value + delta_value - 1) //
delta_value)]
# Reduction operations
def _ReductionDims(x, reduction_indices):
"""Returns range(0, rank(x)) if reduction_indices is None."""
if reduction_indices is not None:
return reduction_indices
else:
# TODO(zongheng): remove this once rank() supports SparseTensor.
if isinstance(x, ops.SparseTensor):
return range(0, array_ops.size(x.shape))
return range(0, array_ops.rank(x))
def reduce_sum(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the sum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1, 1, 1]
# [1, 1, 1]]
tf.reduce_sum(x) ==> 6
tf.reduce_sum(x, 0) ==> [2, 2, 2]
tf.reduce_sum(x, 1) ==> [3, 3]
tf.reduce_sum(x, 1, keep_dims=True) ==> [[3], [3]]
tf.reduce_sum(x, [0, 1]) ==> 6
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._sum(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_mean(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1., 1.]
# [2., 2.]]
tf.reduce_mean(x) ==> 1.5
tf.reduce_mean(x, 0) ==> [1.5, 1.5]
tf.reduce_mean(x, 1) ==> [1., 2.]
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._mean(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_prod(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the product of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._prod(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_min(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the minimum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._min(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_max(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the maximum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._max(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_all(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the "logical and" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]
# [False, False]]
tf.reduce_all(x) ==> False
tf.reduce_all(x, 0) ==> [False, False]
tf.reduce_all(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._all(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_any(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the "logical or" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]
# [False, False]]
tf.reduce_any(x) ==> True
tf.reduce_any(x, 0) ==> [True, True]
tf.reduce_any(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._any(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def trace(x, name=None):
""" Compute the trace of a tensor `x`.
`trace(x)` returns the sum of along the diagonal.
For example:
```python
# 'x' is [[1, 1],
# [1, 1]]
tf.trace(x) ==> 2
# 'x' is [[1,2,3],
# [4,5,6],
# [7,8,9]]
tf.trace(x) ==> 15
```
Args:
x: 2-D tensor.
name: A name for the operation (optional).
Returns:
The trace of input tensor.
"""
with ops.op_scope([x], name, "Trace") as name:
x = ops.convert_to_tensor(x, name="x")
if len(x.get_shape()) != 2:
raise ValueError("Expected a tensor with rank 2, rank %d tensor received"
% len(x.get_shape()))
return reduce_sum(array_ops.diag_part(x), name=name)
def matmul(a, b,
transpose_a=False, transpose_b=False,
a_is_sparse=False, b_is_sparse=False,
name=None):
"""Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
The inputs must be two-dimensional matrices, with matching inner dimensions,
possibly after transposition.
Both matrices must be of the same type. The supported types are:
`float`, `double`, `int32`, `complex64`.
Either matrix can be transposed on the fly by setting the corresponding flag
to `True`. This is `False` by default.
If one or both of the matrices contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
For example:
```python
# 2-D tensor `a`
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) => [[1. 2. 3.]
[4. 5. 6.]]
# 2-D tensor `b`
b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2]) => [[7. 8.]
[9. 10.]
[11. 12.]]
c = tf.matmul(a, b) => [[58 64]
[139 154]]
```
Args:
a: `Tensor` of type `float`, `double`, `int32` or `complex64`.
b: `Tensor` with same type as `a`.
transpose_a: If `True`, `a` is transposed before multiplication.
transpose_b: If `True`, `b` is transposed before multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix.
b_is_sparse: If `True`, `b` is treated as a sparse matrix.
name: Name for the operation (optional).
Returns:
A `Tensor` of the same type as `a`.
"""
with ops.op_scope([a, b], name, "MatMul") as name:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
sparse_matmul_types = [dtypes.bfloat16, dtypes.float32]
use_sparse_matmul = (a.dtype in sparse_matmul_types and
b.dtype in sparse_matmul_types and
(a_is_sparse or b_is_sparse))
if dtypes.bfloat16 in (a.dtype, b.dtype):
# matmul currently doesn't handle bfloat16 inputs.
use_sparse_matmul = True
if use_sparse_matmul:
return sparse_matmul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
name=name)
else:
return gen_math_ops._mat_mul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
name=name)
sparse_matmul = gen_math_ops._sparse_mat_mul
batch_matmul = gen_math_ops._batch_mat_mul
ops.RegisterShape("MatMul")(common_shapes.matmul_shape)
ops.RegisterShape("SparseMatMul")(common_shapes.matmul_shape)
@ops.RegisterStatistics("MatMul", "flops")
def _calc_mat_mul_flops(graph, node):
"""Calculates the compute resources needed for MatMul."""
transpose_a = node.attr["transpose_a"].b
a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
a_shape.assert_is_fully_defined()
if transpose_a:
k = int(a_shape[0])
else:
k = int(a_shape[1])
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (k * output_count * 2))
@ops.RegisterStatistics("MatMul", "weight_parameters")
def _calc_mat_mul_weight_parameters(graph, node):
"""Calculates the on-disk size of the weights for MatMul."""
# We assume here that the weights are always in the second input to the op,
# which is generally true by convention for fully-connected layers, but not
# enforced or checked.
weights_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
weights_shape.assert_is_fully_defined()
return ops.OpStats("weight_parameters",
(int(weights_shape[1]) * int(weights_shape[0])))
def _as_indexed_slices(x):
"""Convert 'x' to IndexedSlices.
Convert a dense Tensor to a block-sparse IndexedSlices.
Args:
x: Either a Tensor object, or an IndexedSlices object.
Returns:
An IndexedSlices object.
Raises:
TypeError: If 'x' is not a Tensor or an IndexedSlices object.
"""
# TODO(touts): op_scope
if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x))
if isinstance(x, ops.IndexedSlices):
return x
x_shape = array_ops.shape(x)
return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)
def _as_indexed_slices_list(inputs):
"""Convert all elements of 'inputs' to IndexedSlices.
Additionally, homogenize the types of all the indices to
either int32 or int64.
Args:
inputs: List containing either Tensor or IndexedSlices objects.
Returns:
A list of IndexedSlices objects.
Raises:
TypeError: If 'inputs' is not a list or a tuple.
"""
if not isinstance(inputs, (list, tuple)):
raise TypeError("Expected a list or tuple, not a %s" % type(inputs))
outputs = [_as_indexed_slices(i) for i in inputs]
with_int32_index = [o.indices for o in outputs
if o.indices.dtype == dtypes.int32]
if not with_int32_index or len(with_int32_index) == len(outputs):
return outputs
casted_outputs = []
for o in outputs:
if o.indices.dtype == dtypes.int32:
casted_outputs.append(
ops.IndexedSlices(o.values, cast(o.indices, dtypes.int64),
o.dense_shape))
else:
casted_outputs.append(o)
return casted_outputs
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
"""Returns the element-wise sum of a list of tensors.
Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
otherwise, these are inferred.
For example:
```python
# tensor 'a' is [[1, 2], [3, 4]]
# tensor `b` is [[5, 0], [0, 6]]
tf.accumulate_n([a, b, a]) ==> [[7, 4], [6, 14]]
# Explicitly pass shape and type
tf.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
==> [[7, 4], [6, 14]]
```
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
shape: Shape of elements of `inputs`.
tensor_dtype: The type of `inputs`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if tensor_dtype is None:
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if not all(x.dtype == inputs[0].dtype for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
tensor_dtype = inputs[0].dtype
if shape is not None:
shape = tensor_shape.as_shape(shape)
else:
shape = tensor_shape.unknown_shape()
for input_tensor in inputs:
if isinstance(input_tensor, ops.Tensor):
shape = shape.merge_with(input_tensor.get_shape())
if not shape.is_fully_defined():
# TODO(pbar): Make a version of assign_add that accepts an uninitialized
# lvalue, and takes its shape from that? This would allow accumulate_n to
# work in all situations that add_n currently works.
raise ValueError("Cannot infer the shape of the accumulator for "
"accumulate_n. Pass the shape argument, or set the shape "
"of at least one of the inputs.")
with ops.op_scope(inputs, name, "AccumulateN") as name:
if len(inputs) == 1:
return inputs[0]
var = gen_state_ops._temporary_variable(shape=shape, dtype=tensor_dtype)
var_name = var.op.name
var = state_ops.assign(var, array_ops.zeros_like(inputs[0]))
update_ops = []
for input_tensor in inputs:
op = state_ops.assign_add(var, input_tensor, use_locking=True)
update_ops.append(op)
with ops.control_dependencies(update_ops):
return gen_state_ops._destroy_temporary_variable(var,
var_name=var_name,
name=name)
@ops.RegisterShape("BatchMatMul")
def _BatchMatMulShape(op):
"""Shape function for BatchMatMul op."""
a_shape = op.inputs[0].get_shape()
adj_a = op.get_attr("adj_x")
b_shape = op.inputs[1].get_shape()
adj_b = op.get_attr("adj_y")
if a_shape.dims is None and b_shape.dims is None:
return [tensor_shape.unknown_shape()]
batch_dims = a_shape[:-2].merge_with(b_shape[:-2])
output_rows = a_shape[-1] if adj_a else a_shape[-2]
output_cols = b_shape[-2] if adj_b else b_shape[-1]
inner_a = a_shape[-2] if adj_a else a_shape[-1]
inner_b = b_shape[-1] if adj_b else b_shape[-2]
inner_a.assert_is_compatible_with(inner_b)
return [batch_dims.concatenate([output_rows, output_cols])]
def sigmoid(x, name=None):
"""Computes sigmoid of `x` element-wise.
Specifically, `y = 1 / (1 + exp(-x))`.
Args:
x: A Tensor with type `float`, `double`, `int32`, `complex64`, `int64`,
or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x` if `x.dtype != qint32`
otherwise the return type is `quint8`.
"""
with ops.op_scope([x], name, "Sigmoid") as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._sigmoid(x, name=name)
def tanh(x, name=None):
"""Computes hyperbolic tangent of `x` element-wise.
Args:
x: A Tensor with type `float`, `double`, `int32`, `complex64`, `int64`,
or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x` if `x.dtype != qint32` otherwise
the return type is `quint8`.
"""
with ops.op_scope([x], name, "Tanh") as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._tanh(x, name=name)
ops.RegisterShape("Abs")(common_shapes.unchanged_shape)
ops.RegisterShape("Ceil")(common_shapes.unchanged_shape)
ops.RegisterShape("Conj")(common_shapes.unchanged_shape)
ops.RegisterShape("Cos")(common_shapes.unchanged_shape)
ops.RegisterShape("Cross")(common_shapes.unchanged_shape)
ops.RegisterShape("Exp")(common_shapes.unchanged_shape)
ops.RegisterShape("Floor")(common_shapes.unchanged_shape)
ops.RegisterShape("Imag")(common_shapes.unchanged_shape)
ops.RegisterShape("Inv")(common_shapes.unchanged_shape)
ops.RegisterShape("IsFinite")(common_shapes.unchanged_shape)
ops.RegisterShape("IsInf")(common_shapes.unchanged_shape)
ops.RegisterShape("IsNan")(common_shapes.unchanged_shape)
ops.RegisterShape("Log")(common_shapes.unchanged_shape)
ops.RegisterShape("LogicalNot")(common_shapes.unchanged_shape)
ops.RegisterShape("Neg")(common_shapes.unchanged_shape)
ops.RegisterShape("Real")(common_shapes.unchanged_shape)
ops.RegisterShape("Rsqrt")(common_shapes.unchanged_shape)
ops.RegisterShape("Sign")(common_shapes.unchanged_shape)
ops.RegisterShape("Sin")(common_shapes.unchanged_shape)
ops.RegisterShape("Sqrt")(common_shapes.unchanged_shape)
ops.RegisterShape("Square")(common_shapes.unchanged_shape)
ops.RegisterShape("Sigmoid")(common_shapes.unchanged_shape)
ops.RegisterShape("Tanh")(common_shapes.unchanged_shape)
ops.RegisterShape("Lgamma")(common_shapes.unchanged_shape)
ops.RegisterShape("Digamma")(common_shapes.unchanged_shape)
ops.RegisterShape("Erf")(common_shapes.unchanged_shape)
ops.RegisterShape("Erfc")(common_shapes.unchanged_shape)
ops.RegisterShape("Cast")(common_shapes.unchanged_shape)
ops.RegisterShape("ComplexAbs")(common_shapes.unchanged_shape)
ops.RegisterShape("FFT")(common_shapes.unchanged_shape)
ops.RegisterShape("IFFT")(common_shapes.unchanged_shape)
ops.RegisterShape("FFT2D")(common_shapes.unchanged_shape)
ops.RegisterShape("IFFT2D")(common_shapes.unchanged_shape)
ops.RegisterShape("FFT3D")(common_shapes.unchanged_shape)
ops.RegisterShape("IFFT3D")(common_shapes.unchanged_shape)
ops.RegisterShape("BatchFFT")(common_shapes.unchanged_shape)
ops.RegisterShape("BatchIFFT")(common_shapes.unchanged_shape)
ops.RegisterShape("BatchFFT2D")(common_shapes.unchanged_shape)
ops.RegisterShape("BatchIFFT2D")(common_shapes.unchanged_shape)
ops.RegisterShape("BatchFFT3D")(common_shapes.unchanged_shape)
ops.RegisterShape("BatchIFFT3D")(common_shapes.unchanged_shape)
@ops.RegisterShape("Add")
@ops.RegisterShape("Complex")
@ops.RegisterShape("Div")
@ops.RegisterShape("Equal")
@ops.RegisterShape("Greater")
@ops.RegisterShape("GreaterEqual")
@ops.RegisterShape("Igamma")
@ops.RegisterShape("Igammac")
@ops.RegisterShape("Zeta")
@ops.RegisterShape("Polygamma")
@ops.RegisterShape("Less")
@ops.RegisterShape("LessEqual")
@ops.RegisterShape("LogicalAnd")
@ops.RegisterShape("LogicalOr")
@ops.RegisterShape("Maximum")
@ops.RegisterShape("Minimum")
@ops.RegisterShape("Mod")
@ops.RegisterShape("Mul")
@ops.RegisterShape("NotEqual")
@ops.RegisterShape("Pow")
@ops.RegisterShape("Sub")
@ops.RegisterShape("SquaredDifference")
def _BroadcastShape(op):
"""Common shape function for binary operators that broadcast their inputs."""
shape_x = op.inputs[0].get_shape()
shape_y = op.inputs[1].get_shape()
if shape_x.ndims is None or shape_y.ndims is None:
return [tensor_shape.unknown_shape()]
# To compute the broadcasted dimensions, we zip together shape_x and shape_y,
# and pad with 1 to make them the same length.
broadcasted_dims = reversed(list(six.moves.zip_longest(
reversed(shape_x.dims),
reversed(shape_y.dims),
fillvalue=tensor_shape.Dimension(1))))
# Next we combine the dimensions according to the numpy broadcasting rules.
# http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html
return_dims = []
for (dim_x, dim_y) in broadcasted_dims:
if dim_x.value is None or dim_y.value is None:
# One or both dimensions is unknown. If either dimension is greater than
# 1, we assume that the program is correct, and the other dimension will
# be broadcast to match it.
# TODO(mrry): If we eliminate the shape checks in C++, we must still
# assert that the unknown dim is either 1 or the same as the known dim.
if dim_x.value is not None and dim_x.value > 1:
return_dims.append(dim_x)
elif dim_y.value is not None and dim_y.value > 1:
return_dims.append(dim_y)
else:
return_dims.append(None)
elif dim_x.value == 1:
# We will broadcast dim_x to dim_y.
return_dims.append(dim_y)
elif dim_y.value == 1:
# We will broadcast dim_y to dim_x.
return_dims.append(dim_x)
elif dim_x.value == dim_y.value:
# The dimensions are compatible, so output is the same size in that
# dimension.
return_dims.append(dim_x.merge_with(dim_y))
else:
raise ValueError("Incompatible shapes for broadcasting: %s and %s"
% (shape_x, shape_y))
return [tensor_shape.TensorShape(return_dims)]
@ops.RegisterShape("SparseDenseCwiseMul")
@ops.RegisterShape("SparseDenseCwiseDiv")
@ops.RegisterShape("SparseDenseCwiseAdd")
def _SparseDenseBinaryOpShape(op): # pylint: disable=invalid-name
"""Common shape for 'sparse <binary cwise op> dense -> sparse' operators."""
nnz = op.inputs[1].get_shape()[0]
return [tensor_shape.TensorShape(nnz)]
@ops.RegisterShape("AddN")
def _AddNShape(op):
merged_shape = tensor_shape.unknown_shape()
for input_ in op.inputs:
merged_shape = merged_shape.merge_with(input_.get_shape())
return [merged_shape]
@ops.RegisterShape("Select")
def _SelectShape(op):
"""Shape function for SelectOp."""
# The inputs 'then' and 'else' must have the same shape.
# The input 'cond' must either have the same shape as 'then' and
# 'else', or be a vector if 'then' and 'else' are at least vectors.
c_shape = op.inputs[0].get_shape()
t_shape = op.inputs[1].get_shape()
e_shape = op.inputs[2].get_shape()
t_e_shape = t_shape.merge_with(e_shape)
c_shape_list = c_shape.as_list() if c_shape.ndims is not None else None
t_e_shape_list = t_e_shape.as_list() if t_e_shape.ndims is not None else None
if c_shape_list is not None and t_e_shape_list is not None:
if len(c_shape_list) != 1:
# If the rank of 'cond' is != 1, the shape must match 'then' and 'else'
t_e_shape = t_e_shape.merge_with(c_shape)
if t_e_shape_list:
# If then and else are not scalars, then cond must be at least
# a vector, and its first value must match that of 'else'
c_shape = c_shape.with_rank_at_least(1)
if len(c_shape.as_list()) == 1:
c_shape.merge_with(tensor_shape.vector(t_e_shape_list[0]))
return [t_e_shape]
@ops.RegisterShape("ArgMax")
@ops.RegisterShape("ArgMin")
def _ArgOpShape(op):
"""Common shape function for arg-reduction ops."""
dimension_shape = op.inputs[1].get_shape()
dimension_shape.assert_is_compatible_with(tensor_shape.scalar())
input_shape = op.inputs[0].get_shape()
if input_shape.ndims is None:
return [tensor_shape.unknown_shape()]
elif input_shape.ndims <= 1:
return [tensor_shape.scalar()]
dimension = tensor_util.constant_value(op.inputs[1])
if dimension is None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims - 1)]
elif 0 <= dimension and dimension < input_shape.ndims:
returned_shape = []
for i, dim in enumerate(input_shape.dims):
if i != dimension:
returned_shape.append(dim)
return [tensor_shape.TensorShape(returned_shape)]
else:
raise ValueError(
"dimension (%d) must be in the range [0, %d), where %d is the number "
"of dimensions in the input"
% (dimension, input_shape.ndims, input_shape.ndims))
@ops.RegisterShape("All")
@ops.RegisterShape("Any")
@ops.RegisterShape("Max")
@ops.RegisterShape("Mean")
@ops.RegisterShape("Min")
@ops.RegisterShape("Prod")
@ops.RegisterShape("Sum")
def _ReductionShape(op):
"""Common shape function for reduction ops."""
input_shape = op.inputs[0].get_shape()
reduction_indices = tensor_util.constant_value(op.inputs[1])
keep_dims = op.get_attr("keep_dims")
if reduction_indices is None or input_shape.ndims is None:
if keep_dims:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
else:
return [tensor_shape.unknown_shape()]
# Turn reduction_indices from scalar to vector if necessary
reduction_indices = np.ravel(reduction_indices)
for reduction_index in reduction_indices:
if (reduction_index < -input_shape.ndims or
reduction_index >= input_shape.ndims):
raise ValueError("Invalid reduction dimension %d for input with %d "
"dimensions" % (reduction_index, input_shape.ndims))
reduction_indices = set([(x + input_shape.ndims) % input_shape.ndims
for x in reduction_indices])
returned_dims = []
if keep_dims:
for i, dim in enumerate(input_shape.dims):
if i in reduction_indices:
returned_dims.append(1)
else:
returned_dims.append(dim)
else:
for i, dim in enumerate(input_shape.dims):
if i not in reduction_indices:
returned_dims.append(dim)
return [tensor_shape.TensorShape(returned_dims)]
@ops.RegisterShape("SegmentMax")
@ops.RegisterShape("SegmentMean")
@ops.RegisterShape("SegmentMin")
@ops.RegisterShape("SegmentProd")
@ops.RegisterShape("SegmentSum")
def _SegmentReductionShape(op):
"""Common shape function for segment reduction ops."""
data_shape = op.inputs[0].get_shape()
segment_ids_shape = op.inputs[1].get_shape()
segment_ids_shape.assert_has_rank(1)
return [tensor_shape.TensorShape([None]).concatenate(data_shape[1:])]
@ops.RegisterShape("SparseSegmentMean")
@ops.RegisterShape("SparseSegmentSqrtN")
@ops.RegisterShape("SparseSegmentSum")
def _SparseSegmentReductionShape(op):
"""Common shape function for sparse segment reduction ops."""
data_shape = op.inputs[0].get_shape()
indices_shape = op.inputs[1].get_shape()
indices_shape.assert_has_rank(1)
segment_ids_shape = op.inputs[2].get_shape()
segment_ids_shape.assert_has_rank(1)
indices_shape.assert_is_compatible_with(segment_ids_shape)
return [tensor_shape.TensorShape([None]).concatenate(data_shape[1:])]
@ops.RegisterShape("SparseSegmentMeanGrad")
@ops.RegisterShape("SparseSegmentSqrtNGrad")
# pylint: disable=invalid-name
def _SparseSegmentReductionGradShape(op):
"""Shape function for the SparseSegment[Mean|SqrtN]Grad ops."""
input_shape = op.inputs[0].get_shape()
indices_shape = op.inputs[1].get_shape().with_rank(1)
unused_segment_ids_shape = op.inputs[2].get_shape().merge_with(indices_shape)
unused_output_dim0_shape = op.inputs[3].get_shape().merge_with(
tensor_shape.scalar())
dim0 = tensor_util.constant_value(op.inputs[3])
return [tensor_shape.TensorShape([dim0]).concatenate(input_shape[1:])]
# pylint: enable=invalid-name
@ops.RegisterShape("UnsortedSegmentSum")
def _UnsortedSegmentSumShape(op):
"""Shape function for UnsortedSegmentSum."""
data_shape = op.inputs[0].get_shape()
segment_ids_shape = op.inputs[1].get_shape()
mid = segment_ids_shape.ndims
if mid is None:
return [tensor_shape.unknown_shape()]
else:
num_segments = tensor_util.constant_value(op.inputs[2])
return [tensor_shape.TensorShape([num_segments]).concatenate(
data_shape[mid:])]
@ops.RegisterShape("LinSpace")
def _LinspaceShape(op):
num = tensor_util.constant_value(op.inputs[2])
return [tensor_shape.vector(num)]
def reduced_shape(input_shape, axes):
"""Helper function for reduction ops.
Args:
input_shape: 1-D Tensor, the shape of the Tensor being reduced.
axes: 1-D Tensor, the reduction axes.
Returns:
A 1-D Tensor, the output shape as if keep_dims were set to True.
"""
# Example:
# cast needed for SparseTensor reductions
input_shape = to_int32(input_shape) # [2, 3, 5, 7]
axes = to_int32(axes) # [1, 2]
input_rank = array_ops.size(input_shape) # 4
axes = (axes + input_rank) % input_rank
axes_shape = array_ops.shape(axes) # [2]
return gen_data_flow_ops.dynamic_stitch( # [2, 1, 1, 7]
[range(input_rank), # [0, 1, 2, 3]
axes], # [1, 2]
[input_shape, # [2, 3, 5, 7]
array_ops.fill(axes_shape, 1)]) # [1, 1]
|
sachinpro/sachinpro.github.io
|
tensorflow/python/ops/math_ops.py
|
Python
|
apache-2.0
| 58,305
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# load and run the onnx model exported from pytorch
# https://github.com/onnx/tutorials/blob/master/tutorials/PytorchOnnxExport.ipynb
import argparse
from singa import device
from singa import sonnx
from singa import tensor
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Load model from pytorch")
parser.add_argument("--use_cpu", action="store_true")
args = parser.parse_args()
if args.use_cpu:
print("Using CPU")
dev = device.get_default_device()
else:
print("Using GPU")
dev = device.create_cuda_gpu()
model = sonnx.load("alexnet.onnx")
backend = sonnx.prepare(model, dev)
input_name = model.graph.inputs[0].name
inputs = tensor.Tensor(shape=(2, 3, 224, 224), device=dev, name=input_name)
inputs.gaussian(0, 0.01)
y = backend.run([inputs])[0]
|
kaiping/incubator-singa
|
examples/onnx/backend.py
|
Python
|
apache-2.0
| 1,642
|
# -*- coding: utf-8 -*-
"""
taiga_ncurses.ui.views.backlog
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import urwid
from taiga_ncurses.ui.widgets import generic, backlog
from . import base
class ProjectBacklogSubView(base.SubView):
help_popup_title = "Backlog Help Info"
help_popup_info = base.SubView.help_popup_info + (
( "Backlog Movements:", (
("↑ | k | ctrl p", "Move Up"),
("↓ | j | ctrl n", "Move Down"),
("← | h | ctrl b", "Move Left"),
("→ | l | ctrl f", "Move Right"),
)),
( "User Stories Actions:", (
("n", "Create new US"),
("N", "Create new USs in bulk"),
("e", "Edit selected US"),
("Supr", "Delete selected US"),
("K", "Move selected US up"),
("J", "Move selected US down"),
("w", "Save the position of all USs"),
("m", "Move selected US to a Milestone"),
("r", "Refresh the screen")
)),
)
def __init__(self, parent_view, project, notifier, tabs):
super().__init__(parent_view)
self.project = project
self.notifier = notifier
self.stats = backlog.BacklogStats(project)
self.user_stories = backlog.UserStoryList(project)
list_walker = urwid.SimpleFocusListWalker([
tabs,
generic.box_solid_fill(" ", 1),
self.stats,
generic.box_solid_fill(" ", 1),
self.user_stories
])
list_walker.set_focus(4)
self.widget = urwid.ListBox(list_walker)
def open_user_story_form(self, user_story={}):
self.user_story_form = backlog.UserStoryForm(self.project, user_story=user_story)
# FIXME: Calculate the form size
self.parent.show_widget_on_top(self.user_story_form, 80, 24)
def close_user_story_form(self):
del self.user_story_form
self.parent.hide_widget_on_top()
def get_user_story_form_data(self):
data = {}
if hasattr(self, "user_story_form"):
data.update({
"subject": self.user_story_form.subject,
"milestone": self.user_story_form.milestone,
"points": self.user_story_form.points,
"status": self.user_story_form.status,
"is_blocked": self.user_story_form.is_blocked,
"blocked_note": self.user_story_form.blocked_note,
"tags": self.user_story_form.tags,
"description": self.user_story_form.description,
"team_requirement": self.user_story_form.team_requirement,
"client_requirement": self.user_story_form.client_requirement,
"project": self.project["id"],
})
return data
def open_user_stories_in_bulk_form(self):
self.user_stories_in_bulk_form = backlog.UserStoriesInBulkForm(self.project)
# FIXME: Calculate the form size
self.parent.show_widget_on_top(self.user_stories_in_bulk_form, 80, 24)
def close_user_stories_in_bulk_form(self):
del self.user_stories_in_bulk_form
self.parent.hide_widget_on_top()
def get_user_stories_in_bulk_form_data(self):
data = {}
if hasattr(self, "user_stories_in_bulk_form"):
data.update({
"bulkStories": self.user_stories_in_bulk_form.subjects,
"projectId": self.project["id"],
})
return data
def open_milestones_selector_popup(self, user_story={}):
self.milestone_selector_popup = backlog.MIlestoneSelectorPopup(self.project, user_story)
# FIXME: Calculate the popup size
self.parent.show_widget_on_top(self.milestone_selector_popup, 100, 30)
def close_milestone_selector_popup(self):
del self.milestone_selector_popup
self.parent.hide_widget_on_top()
|
battlemidget/taiga-ncurses
|
taiga_ncurses/ui/views/backlog.py
|
Python
|
apache-2.0
| 3,880
|
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from ironic.common import driver_factory
from ironic.common import exception
from ironic.conductor import task_manager
from ironic.drivers import base as driver_base
from ironic.drivers.modules import agent
from ironic.drivers.modules import fake
from ironic.drivers.modules import inspector
from ironic.drivers.modules import iscsi_deploy
from ironic.drivers.modules import noop
from ironic.drivers.modules import pxe
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as obj_utils
class ManualManagementHardwareTestCase(db_base.DbTestCase):
def setUp(self):
super(ManualManagementHardwareTestCase, self).setUp()
self.config(enabled_hardware_types=['manual-management'],
enabled_power_interfaces=['fake'],
enabled_management_interfaces=['fake'],
enabled_inspect_interfaces=['no-inspect'])
self.config(enabled=True, group='inspector')
def test_default_interfaces(self):
node = obj_utils.create_test_node(self.context,
driver='manual-management')
with task_manager.acquire(self.context, node.id) as task:
self.assertIsInstance(task.driver.management, fake.FakeManagement)
self.assertIsInstance(task.driver.power, fake.FakePower)
self.assertIsInstance(task.driver.boot, pxe.PXEBoot)
self.assertIsInstance(task.driver.deploy, iscsi_deploy.ISCSIDeploy)
self.assertIsInstance(task.driver.inspect, noop.NoInspect)
self.assertIsInstance(task.driver.raid, noop.NoRAID)
def test_supported_interfaces(self):
self.config(enabled_inspect_interfaces=['inspector', 'no-inspect'])
node = obj_utils.create_test_node(self.context,
driver='manual-management',
deploy_interface='direct',
raid_interface='agent')
with task_manager.acquire(self.context, node.id) as task:
self.assertIsInstance(task.driver.management, fake.FakeManagement)
self.assertIsInstance(task.driver.power, fake.FakePower)
self.assertIsInstance(task.driver.boot, pxe.PXEBoot)
self.assertIsInstance(task.driver.deploy, agent.AgentDeploy)
self.assertIsInstance(task.driver.inspect, inspector.Inspector)
self.assertIsInstance(task.driver.raid, agent.AgentRAID)
def test_get_properties(self):
# These properties are from vendor (agent) and boot (pxe) interfaces
expected_prop_keys = [
'deploy_forces_oob_reboot', 'deploy_kernel', 'deploy_ramdisk']
hardware_type = driver_factory.get_hardware_type("manual-management")
properties = hardware_type.get_properties()
self.assertEqual(sorted(expected_prop_keys), sorted(properties.keys()))
@mock.patch.object(driver_factory, 'default_interface', autospec=True)
def test_get_properties_none(self, mock_def_iface):
hardware_type = driver_factory.get_hardware_type("manual-management")
mock_def_iface.side_effect = exception.NoValidDefaultForInterface("no")
properties = hardware_type.get_properties()
self.assertEqual({}, properties)
self.assertEqual(len(driver_base.ALL_INTERFACES),
mock_def_iface.call_count)
|
SauloAislan/ironic
|
ironic/tests/unit/drivers/test_generic.py
|
Python
|
apache-2.0
| 4,019
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
JSON related utilities.
This module provides a few things:
1) A handy function for getting an object down to something that can be
JSON serialized. See to_primitive().
2) Wrappers around loads() and dumps(). The dumps() wrapper will
automatically use to_primitive() for you if needed.
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
is available.
'''
import datetime
import functools
import inspect
import itertools
import json
try:
import xmlrpclib
except ImportError:
# NOTE(jd): xmlrpclib is not shipped with Python 3
xmlrpclib = None
import six
from keystoneclient.openstack.common import gettextutils
from keystoneclient.openstack.common import importutils
from keystoneclient.openstack.common import timeutils
netaddr = importutils.try_import("netaddr")
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
_simple_types = (six.string_types + six.integer_types
+ (type(None), bool, float))
def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
# handle obvious types first - order of basic types determined by running
# full tests on nova project, resulting in the following counts:
# 572754 <type 'NoneType'>
# 460353 <type 'int'>
# 379632 <type 'unicode'>
# 274610 <type 'str'>
# 199918 <type 'dict'>
# 114200 <type 'datetime.datetime'>
# 51817 <type 'bool'>
# 26164 <type 'list'>
# 6491 <type 'float'>
# 283 <type 'tuple'>
# 19 <type 'long'>
if isinstance(value, _simple_types):
return value
if isinstance(value, datetime.datetime):
if convert_datetime:
return timeutils.strtime(value)
else:
return value
# value of itertools.count doesn't get caught by nasty_type_tests
# and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return six.text_type(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > max_depth:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
level=level,
max_depth=max_depth)
if isinstance(value, dict):
return dict((k, recursive(v)) for k, v in value.iteritems())
elif isinstance(value, (list, tuple)):
return [recursive(lv) for lv in value]
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if xmlrpclib and isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif isinstance(value, gettextutils.Message):
return value.data
elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
return recursive(list(value))
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
elif netaddr and isinstance(value, netaddr.IPAddress):
return six.text_type(value)
else:
if any(test(value) for test in _nasty_type_tests):
return six.text_type(value)
return value
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return six.text_type(value)
def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
def loads(s):
return json.loads(s)
def load(s):
return json.load(s)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append((__name__, 'dumps', TypeError,
'loads', ValueError, 'load'))
anyjson.force_implementation(__name__)
|
raildo/python-keystoneclient
|
python-keystoneclient-0.4.1.7.gdca1d42/keystoneclient/openstack/common/jsonutils.py
|
Python
|
apache-2.0
| 6,464
|
import os
import sys
import json
from datetime import datetime
from django.db import connection, transaction
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from fly_project import constants
from api.models import QuizSubmission
from api.models import QuestionSubmission
class Command(BaseCommand):
"""
Run in your console:
$ python manage.py evaluate_quiz {{ quiz_submission_id }}
"""
help = _('Command will mark and score the User\'s submitted quiz.')
def add_arguments(self, parser):
parser.add_argument('id', nargs='+')
def handle(self, *args, **options):
# Process all the Quizzes that are inputted into this Command.
for id in options['id']:
try:
submission = QuizSubmission.objects.get(id=id)
self.begin_processing(submission)
except QuizSubmission.DoesNotExist:
pass
def begin_processing(self, submission):
"""
Function will load up the Quiz Submission and iterate through all
Question Submissions and evaluate them for being either correct
or wrong and then assign a mark to them.
"""
# Step 1: Fetch all the submitted Questions for this Quiz.
try:
question_submissions = QuestionSubmission.objects.filter(
quiz=submission.quiz,
user=submission.user,
)
except QuestionSubmission.DoesNotExist:
question_submissions = None
# Step 2: Iterate through all the submitted Questions and mark them
# either right or wrong depending on the correct answer.
for question_submission in question_submissions.all():
is_right = True
question_answer = question_submission.question
# Step 3: If the question is 'Open-ended' then simply give the
# student the mark and finish this function, else then
# evaluate the quiz question.
if question_submission.type == 1:
question_submission.mark = 1
else:
if question_submission.a is not question_answer.a_is_correct:
is_right = False
if question_submission.b is not question_answer.b_is_correct:
is_right = False
if question_submission.c is not question_answer.c_is_correct:
is_right = False
if question_submission.d is not question_answer.d_is_correct:
is_right = False
if question_submission.e is not question_answer.e_is_correct:
is_right = False
if question_submission.f is not question_answer.f_is_correct:
is_right = False
if is_right:
question_submission.mark = 1
else:
question_submission.mark = 0
question_submission.save()
# Step 4: Iterate through all the submitted Questions and perform a
# total mark tally of the Quiz and then mark the Quiz either a success
# or a failure.
total_mark = 0
actual_mark = 0
for question_submission in question_submissions.all():
total_mark += 1
actual_mark += question_submission.mark
final_mark = (actual_mark / total_mark) * 100
submission.final_mark = final_mark
submission.save()
|
Oinweb/py-fly
|
api/management/commands/evaluate_quiz.py
|
Python
|
bsd-2-clause
| 3,663
|
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Tracing controller class. This class manages
multiple tracing agents and collects data from all of them. It also
manages the clock sync process.
'''
from __future__ import print_function
import ast
import json
import sys
import tempfile
import uuid
import py_utils
from systrace import trace_result
from systrace import tracing_agents
from py_trace_event import trace_event
TRACE_DATA_CONTROLLER_NAME = 'systraceController'
def ControllerAgentClockSync(issue_ts, name):
"""Record the clock sync marker for controller tracing agent.
Unlike with the other tracing agents, the tracing controller should not
call this directly. Rather, it is called via callback from the other
tracing agents when they write a trace.
"""
trace_event.clock_sync(name, issue_ts=issue_ts)
class TracingControllerAgent(tracing_agents.TracingAgent):
def __init__(self):
# TODO(https://crbug.com/1262296): Update this after Python2 trybots retire.
# pylint: disable=super-with-arguments
super(TracingControllerAgent, self).__init__()
self._log_path = None
@py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
def StartAgentTracing(self, config, timeout=None):
"""Start tracing for the controller tracing agent.
Start tracing for the controller tracing agent. Note that
the tracing controller records the "controller side"
of the clock sync records, and nothing else.
"""
del config
if not trace_event.trace_can_enable():
raise RuntimeError('Cannot enable trace_event;'
' ensure py_utils is in PYTHONPATH')
controller_log_file = tempfile.NamedTemporaryFile(delete=False)
self._log_path = controller_log_file.name
controller_log_file.close()
trace_event.trace_enable(self._log_path)
return True
@py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
def StopAgentTracing(self, timeout=None):
"""Stops tracing for the controller tracing agent.
"""
# pylint: disable=no-self-use
# This function doesn't use self, but making it a member function
# for consistency with the other TracingAgents
trace_event.trace_disable()
return True
@py_utils.Timeout(tracing_agents.GET_RESULTS_TIMEOUT)
def GetResults(self, timeout=None):
"""Gets the log output from the controller tracing agent.
This output only contains the "controller side" of the clock sync records.
"""
with open(self._log_path, 'r') as outfile:
data = ast.literal_eval(outfile.read() + ']')
# Explicitly set its own clock domain. This will stop the Systrace clock
# domain from incorrectly being collapsed into the on device clock domain.
formatted_data = {
'traceEvents': data,
'metadata': {
'clock-domain': 'SYSTRACE',
}
}
return trace_result.TraceResult(TRACE_DATA_CONTROLLER_NAME,
json.dumps(formatted_data))
def SupportsExplicitClockSync(self):
"""Returns whether this supports explicit clock sync.
Although the tracing controller conceptually supports explicit clock
sync, it is not an agent controlled by other controllers so it does not
define RecordClockSyncMarker (rather, the recording of the "controller
side" of the clock sync marker is done in _IssueClockSyncMarker). Thus,
SupportsExplicitClockSync must return false.
"""
return False
# TODO(https://crbug.com/1262296): Update this after Python2 trybots retire.
# pylint: disable=arguments-differ
# pylint: disable=unused-argument
def RecordClockSyncMarker(self, sync_id, callback):
raise NotImplementedError
# TODO(https://crbug.com/1262296): Update this after Python2 trybots retire.
# pylint: disable=useless-object-inheritance
class TracingController(object):
def __init__(self, agents_with_config, controller_config):
"""Create tracing controller.
Create a tracing controller object. Note that the tracing
controller is also a tracing agent.
Args:
agents_with_config: List of tracing agents for this controller with the
corresponding tracing configuration objects.
controller_config: Configuration options for the tracing controller.
"""
self._child_agents = None
self._child_agents_with_config = agents_with_config
self._controller_agent = TracingControllerAgent()
self._controller_config = controller_config
self._trace_in_progress = False
self.all_results = None
@property
def get_child_agents(self):
return self._child_agents
def StartTracing(self):
"""Start tracing for all tracing agents.
This function starts tracing for both the controller tracing agent
and the child tracing agents.
Returns:
Boolean indicating whether or not the start tracing succeeded.
Start tracing is considered successful if at least the
controller tracing agent was started.
"""
assert not self._trace_in_progress, 'Trace already in progress.'
self._trace_in_progress = True
# Start the controller tracing agents. Controller tracing agent
# must be started successfully to proceed.
if not self._controller_agent.StartAgentTracing(
self._controller_config,
timeout=self._controller_config.timeout):
print('Unable to start controller tracing agent.')
return False
# Start the child tracing agents.
succ_agents = []
for agent_and_config in self._child_agents_with_config:
agent = agent_and_config.agent
config = agent_and_config.config
if agent.StartAgentTracing(config,
timeout=self._controller_config.timeout):
succ_agents.append(agent)
else:
print('Agent %s not started.' % str(agent))
# Print warning if all agents not started.
na = len(self._child_agents_with_config)
ns = len(succ_agents)
if ns < na:
print('Warning: Only %d of %d tracing agents started.' % (ns, na))
self._child_agents = succ_agents
return True
def StopTracing(self):
"""Issue clock sync marker and stop tracing for all tracing agents.
This function stops both the controller tracing agent
and the child tracing agents. It issues a clock sync marker prior
to stopping tracing.
Returns:
Boolean indicating whether or not the stop tracing succeeded
for all agents.
"""
assert self._trace_in_progress, 'No trace in progress.'
self._trace_in_progress = False
# Issue the clock sync marker and stop the child tracing agents.
self._IssueClockSyncMarker()
succ_agents = []
for agent in self._child_agents:
if agent.StopAgentTracing(timeout=self._controller_config.timeout):
succ_agents.append(agent)
else:
print('Agent %s not stopped.' % str(agent))
# Stop the controller tracing agent. Controller tracing agent
# must be stopped successfully to proceed.
if not self._controller_agent.StopAgentTracing(
timeout=self._controller_config.timeout):
print('Unable to stop controller tracing agent.')
return False
# Print warning if all agents not stopped.
na = len(self._child_agents)
ns = len(succ_agents)
if ns < na:
print('Warning: Only %d of %d tracing agents stopped.' % (ns, na))
self._child_agents = succ_agents
# Collect the results from all the stopped tracing agents.
all_results = []
for agent in self._child_agents + [self._controller_agent]:
try:
result = agent.GetResults(
timeout=self._controller_config.collection_timeout)
if not result:
print('Warning: Timeout when getting results from %s.' % str(agent))
continue
if result.source_name in [r.source_name for r in all_results]:
print ('Warning: Duplicate tracing agents named %s.' %
result.source_name)
all_results.append(result)
# Check for exceptions. If any exceptions are seen, reraise and abort.
# Note that a timeout exception will be swalloed by the timeout
# mechanism and will not get to that point (it will return False instead
# of the trace result, which will be dealt with above)
except:
print('Warning: Exception getting results from %s:' % str(agent))
print('Try checking android device storage permissions for chrome')
print(sys.exc_info()[0])
raise
self.all_results = all_results
return all_results
def GetTraceType(self):
"""Return a string representing the child agents that are being traced."""
sorted_agents = sorted(map(str, self._child_agents))
return ' + '.join(sorted_agents)
def _IssueClockSyncMarker(self):
"""Issue clock sync markers to all the child tracing agents."""
for agent in self._child_agents:
if agent.SupportsExplicitClockSync():
sync_id = GetUniqueSyncID()
agent.RecordClockSyncMarker(sync_id, ControllerAgentClockSync)
def GetUniqueSyncID():
"""Get a unique sync ID.
Gets a unique sync ID by generating a UUID and converting it to a string
(since UUIDs are not JSON serializable)
"""
return str(uuid.uuid4())
# TODO(https://crbug.com/1262296): Update this after Python2 trybots retire.
# pylint: disable=useless-object-inheritance
class AgentWithConfig(object):
def __init__(self, agent, config):
self.agent = agent
self.config = config
def CreateAgentsWithConfig(options, modules):
"""Create tracing agents.
This function will determine which tracing agents are valid given the
options and create those agents along with their corresponding configuration
object.
Args:
options: The command-line options.
modules: The modules for either Systrace or profile_chrome.
TODO(washingtonp): After all profile_chrome agents are in
Systrace, this parameter will no longer be valid.
Returns:
A list of AgentWithConfig options containing agents and their corresponding
configuration object.
"""
result = []
for module in modules:
config = module.get_config(options)
agent = module.try_create_agent(config)
if agent and config:
result.append(AgentWithConfig(agent, config))
return [x for x in result if x and x.agent]
class TracingControllerConfig(tracing_agents.TracingConfig):
def __init__(self, output_file, trace_time, write_json,
link_assets, asset_dir, timeout, collection_timeout,
device_serial_number, target, trace_buf_size):
tracing_agents.TracingConfig.__init__(self)
self.output_file = output_file
self.trace_time = trace_time
self.write_json = write_json
self.link_assets = link_assets
self.asset_dir = asset_dir
self.timeout = timeout
self.collection_timeout = collection_timeout
self.device_serial_number = device_serial_number
self.target = target
self.trace_buf_size = trace_buf_size
def GetControllerConfig(options):
return TracingControllerConfig(options.output_file, options.trace_time,
options.write_json,
options.link_assets, options.asset_dir,
options.timeout, options.collection_timeout,
options.device_serial_number, options.target,
options.trace_buf_size)
def GetChromeStartupControllerConfig(options):
return TracingControllerConfig(None, options.trace_time,
options.write_json, None, None, None, None,
None, None, options.trace_buf_size)
|
catapult-project/catapult
|
systrace/systrace/tracing_controller.py
|
Python
|
bsd-3-clause
| 11,838
|
"""numerictypes: Define the numeric type objects
This module is designed so 'from numerictypes import *' is safe.
Exported symbols include:
Dictionary with all registered number types (including aliases):
typeDict
Type objects (not all will be available, depends on platform):
see variable sctypes for which ones you have
Bit-width names
int8 int16 int32 int64 int128
uint8 uint16 uint32 uint64 uint128
float16 float32 float64 float96 float128 float256
complex32 complex64 complex128 complex192 complex256 complex512
c-based names
bool_
object_
void, str_, unicode_
byte, ubyte,
short, ushort
intc, uintc,
intp, uintp,
int_, uint,
longlong, ulonglong,
single, csingle,
float_, complex_,
longfloat, clongfloat,
As part of the type-hierarchy: xx -- is bit-width
generic
+-> bool_ (kind=b)
+-> number (kind=i)
| integer
| signedinteger (intxx)
| byte
| short
| intc
| intp int0
| int_
| longlong
+-> unsignedinteger (uintxx) (kind=u)
| ubyte
| ushort
| uintc
| uintp uint0
| uint_
| ulonglong
+-> inexact
| +-> floating (floatxx) (kind=f)
| | single
| | float_ (double)
| | longfloat
| \-> complexfloating (complexxx) (kind=c)
| csingle (singlecomplex)
| complex_ (cfloat, cdouble)
| clongfloat (longcomplex)
+-> flexible
| character
| str_ (string_) (kind=S)
| unicode_ (kind=U)
| void (kind=V)
|
\-> object_ (not used much) (kind=O)
"""
# we add more at the bottom
__all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes',
'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char',
'maximum_sctype', 'issctype', 'typecodes', 'find_common_type']
from numpy.core.multiarray import typeinfo, ndarray, array, empty, dtype
import types as _types
# we don't export these for import *, but we do want them accessible
# as numerictypes.bool, etc.
from __builtin__ import bool, int, long, float, complex, object, unicode, str
# String-handling utilities to avoid locale-dependence.
# "import string" is costly to import!
# Construct the translation tables directly
# "A" = chr(65), "a" = chr(97)
_all_chars = map(chr, range(256))
_ascii_upper = _all_chars[65:65+26]
_ascii_lower = _all_chars[97:97+26]
LOWER_TABLE="".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:])
UPPER_TABLE="".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:])
#import string
# assert (string.maketrans(string.ascii_uppercase, string.ascii_lowercase) == \
# LOWER_TABLE)
# assert (string.maketrnas(string_ascii_lowercase, string.ascii_uppercase) == \
# UPPER_TABLE)
#LOWER_TABLE = string.maketrans(string.ascii_uppercase, string.ascii_lowercase)
#UPPER_TABLE = string.maketrans(string.ascii_lowercase, string.ascii_uppercase)
def english_lower(s):
""" Apply English case rules to convert ASCII strings to all lower case.
This is an internal utility function to replace calls to str.lower() such
that we can avoid changing behavior with changing locales. In particular,
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale.
Parameters
----------
s : str
Returns
-------
lowered : str
Examples
--------
>>> from numpy.core.numerictypes import english_lower
>>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_'
>>> english_lower('')
''
"""
lowered = s.translate(LOWER_TABLE)
return lowered
def english_upper(s):
""" Apply English case rules to convert ASCII strings to all upper case.
This is an internal utility function to replace calls to str.upper() such
that we can avoid changing behavior with changing locales. In particular,
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
Parameters
----------
s : str
Returns
-------
uppered : str
Examples
--------
>>> from numpy.core.numerictypes import english_upper
>>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
>>> english_upper('')
''
"""
uppered = s.translate(UPPER_TABLE)
return uppered
def english_capitalize(s):
""" Apply English case rules to convert the first character of an ASCII
string to upper case.
This is an internal utility function to replace calls to str.capitalize()
such that we can avoid changing behavior with changing locales.
Parameters
----------
s : str
Returns
-------
capitalized : str
Examples
--------
>>> from numpy.core.numerictypes import english_capitalize
>>> english_capitalize('int8')
'Int8'
>>> english_capitalize('Int8')
'Int8'
>>> english_capitalize('')
''
"""
if s:
return english_upper(s[0]) + s[1:]
else:
return s
sctypeDict = {} # Contains all leaf-node scalar types with aliases
sctypeNA = {} # Contails all leaf-node types -> numarray type equivalences
allTypes = {} # Collect the types we will add to the module here
def _evalname(name):
k = 0
for ch in name:
if ch in '0123456789':
break
k += 1
try:
bits = int(name[k:])
except ValueError:
bits = 0
base = name[:k]
return base, bits
def bitname(obj):
"""Return a bit-width name for a given type object"""
name = obj.__name__
base = ''
char = ''
try:
if name[-1] == '_':
newname = name[:-1]
else:
newname = name
info = typeinfo[english_upper(newname)]
assert(info[-1] == obj) # sanity check
bits = info[2]
except KeyError: # bit-width name
base, bits = _evalname(name)
char = base[0]
if name == 'bool_':
char = 'b'
base = 'bool'
elif name=='string_':
char = 'S'
base = 'string'
elif name=='unicode_':
char = 'U'
base = 'unicode'
elif name=='void':
char = 'V'
base = 'void'
elif name=='object_':
char = 'O'
base = 'object'
bits = 0
bytes = bits / 8
if char != '' and bytes != 0:
char = "%s%d" % (char, bytes)
return base, bits, char
def _add_types():
for a in typeinfo.keys():
name = english_lower(a)
if isinstance(typeinfo[a], tuple):
typeobj = typeinfo[a][-1]
# define C-name and insert typenum and typechar references also
allTypes[name] = typeobj
sctypeDict[name] = typeobj
sctypeDict[typeinfo[a][0]] = typeobj
sctypeDict[typeinfo[a][1]] = typeobj
else: # generic class
allTypes[name] = typeinfo[a]
_add_types()
def _add_aliases():
for a in typeinfo.keys():
name = english_lower(a)
if not isinstance(typeinfo[a], tuple):
continue
typeobj = typeinfo[a][-1]
# insert bit-width version for this class (if relevant)
base, bit, char = bitname(typeobj)
if base[-3:] == 'int' or char[0] in 'ui': continue
if base != '':
myname = "%s%d" % (base, bit)
if (name != 'longdouble' and name != 'clongdouble') or \
myname not in allTypes.keys():
allTypes[myname] = typeobj
sctypeDict[myname] = typeobj
if base == 'complex':
na_name = '%s%d' % (english_capitalize(base), bit/2)
elif base == 'bool':
na_name = english_capitalize(base)
sctypeDict[na_name] = typeobj
else:
na_name = "%s%d" % (english_capitalize(base), bit)
sctypeDict[na_name] = typeobj
sctypeNA[na_name] = typeobj
sctypeDict[na_name] = typeobj
sctypeNA[typeobj] = na_name
sctypeNA[typeinfo[a][0]] = na_name
if char != '':
sctypeDict[char] = typeobj
sctypeNA[char] = na_name
_add_aliases()
# Integers handled so that
# The int32, int64 types should agree exactly with
# PyArray_INT32, PyArray_INT64 in C
# We need to enforce the same checking as is done
# in arrayobject.h where the order of getting a
# bit-width match is:
# long, longlong, int, short, char
# for int8, int16, int32, int64, int128
def _add_integer_aliases():
_ctypes = ['LONG', 'LONGLONG', 'INT', 'SHORT', 'BYTE']
for ctype in _ctypes:
val = typeinfo[ctype]
bits = val[2]
charname = 'i%d' % (bits/8,)
ucharname = 'u%d' % (bits/8,)
intname = 'int%d' % bits
UIntname = 'UInt%d' % bits
Intname = 'Int%d' % bits
uval = typeinfo['U'+ctype]
typeobj = val[-1]
utypeobj = uval[-1]
if intname not in allTypes.keys():
uintname = 'uint%d' % bits
allTypes[intname] = typeobj
allTypes[uintname] = utypeobj
sctypeDict[intname] = typeobj
sctypeDict[uintname] = utypeobj
sctypeDict[Intname] = typeobj
sctypeDict[UIntname] = utypeobj
sctypeDict[charname] = typeobj
sctypeDict[ucharname] = utypeobj
sctypeNA[Intname] = typeobj
sctypeNA[UIntname] = utypeobj
sctypeNA[charname] = typeobj
sctypeNA[ucharname] = utypeobj
sctypeNA[typeobj] = Intname
sctypeNA[utypeobj] = UIntname
sctypeNA[val[0]] = Intname
sctypeNA[uval[0]] = UIntname
_add_integer_aliases()
# We use these later
void = allTypes['void']
generic = allTypes['generic']
#
# Rework the Python names (so that float and complex and int are consistent
# with Python usage)
#
def _set_up_aliases():
type_pairs = [('complex_', 'cdouble'),
('int0', 'intp'),
('uint0', 'uintp'),
('single', 'float'),
('csingle', 'cfloat'),
('singlecomplex', 'cfloat'),
('float_', 'double'),
('intc', 'int'),
('uintc', 'uint'),
('int_', 'long'),
('uint', 'ulong'),
('cfloat', 'cdouble'),
('longfloat', 'longdouble'),
('clongfloat', 'clongdouble'),
('longcomplex', 'clongdouble'),
('bool_', 'bool'),
('unicode_', 'unicode'),
('str_', 'string'),
('string_', 'string'),
('object_', 'object')]
for alias, t in type_pairs:
allTypes[alias] = allTypes[t]
sctypeDict[alias] = sctypeDict[t]
# Remove aliases overriding python types and modules
for t in ['ulong', 'object', 'unicode', 'int', 'long', 'float',
'complex', 'bool', 'string']:
try:
del allTypes[t]
del sctypeDict[t]
except KeyError:
pass
_set_up_aliases()
# Now, construct dictionary to lookup character codes from types
_sctype2char_dict = {}
def _construct_char_code_lookup():
for name in typeinfo.keys():
tup = typeinfo[name]
if isinstance(tup, tuple):
if tup[0] not in ['p','P']:
_sctype2char_dict[tup[-1]] = tup[0]
_construct_char_code_lookup()
sctypes = {'int': [],
'uint':[],
'float':[],
'complex':[],
'others':[bool,object,str,unicode,void]}
def _add_array_type(typename, bits):
try:
t = allTypes['%s%d' % (typename, bits)]
except KeyError:
pass
else:
sctypes[typename].append(t)
def _set_array_types():
ibytes = [1, 2, 4, 8, 16, 32, 64]
fbytes = [2, 4, 8, 10, 12, 16, 32, 64]
for bytes in ibytes:
bits = 8*bytes
_add_array_type('int', bits)
_add_array_type('uint', bits)
for bytes in fbytes:
bits = 8*bytes
_add_array_type('float', bits)
_add_array_type('complex', 2*bits)
_gi = dtype('p')
if _gi.type not in sctypes['int']:
indx = 0
sz = _gi.itemsize
_lst = sctypes['int']
while (indx < len(_lst) and sz >= _lst[indx](0).itemsize):
indx += 1
sctypes['int'].insert(indx, _gi.type)
sctypes['uint'].insert(indx, dtype('P').type)
_set_array_types()
genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16',
'int32', 'uint32', 'int64', 'uint64', 'int128',
'uint128', 'float16',
'float32', 'float64', 'float80', 'float96', 'float128',
'float256',
'complex32', 'complex64', 'complex128', 'complex160',
'complex192', 'complex256', 'complex512', 'object']
def maximum_sctype(t):
"""returns the sctype of highest precision of the same general kind as 't'"""
g = obj2sctype(t)
if g is None:
return t
t = g
name = t.__name__
base, bits = _evalname(name)
if bits == 0:
return t
else:
return sctypes[base][-1]
_python_types = {int : 'int_',
float: 'float_',
complex: 'complex_',
bool: 'bool_',
str: 'string_',
unicode: 'unicode_',
_types.BufferType: 'void',
}
def _python_type(t):
"""returns the type corresponding to a certain Python type"""
if not isinstance(t, _types.TypeType):
t = type(t)
return allTypes[_python_types.get(t, 'object_')]
def issctype(rep):
"""Determines whether the given object represents
a numeric array type."""
if not isinstance(rep, (type, dtype)):
return False
try:
res = obj2sctype(rep)
if res and res != object_:
return True
return False
except:
return False
def obj2sctype(rep, default=None):
try:
if issubclass(rep, generic):
return rep
except TypeError:
pass
if isinstance(rep, dtype):
return rep.type
if isinstance(rep, type):
return _python_type(rep)
if isinstance(rep, ndarray):
return rep.dtype.type
try:
res = dtype(rep)
except:
return default
return res.type
def issubclass_(arg1, arg2):
try:
return issubclass(arg1, arg2)
except TypeError:
return False
def issubsctype(arg1, arg2):
return issubclass(obj2sctype(arg1), obj2sctype(arg2))
def issubdtype(arg1, arg2):
"""
Returns True if first argument is a typecode lower/equal in type hierarchy.
Parameters
----------
arg1 : dtype_like
dtype or string representing a typecode.
arg2 : dtype_like
dtype or string representing a typecode.
See Also
--------
numpy.core.numerictypes : Overview of numpy type hierarchy.
Examples
--------
>>> np.issubdtype('S1', str)
True
>>> np.issubdtype(np.float64, np.float32)
False
"""
if issubclass_(arg2, generic):
return issubclass(dtype(arg1).type, arg2)
mro = dtype(arg2).type.mro()
if len(mro) > 1:
val = mro[1]
else:
val = mro[0]
return issubclass(dtype(arg1).type, val)
# This dictionary allows look up based on any alias for an array data-type
class _typedict(dict):
def __getitem__(self, obj):
return dict.__getitem__(self, obj2sctype(obj))
nbytes = _typedict()
_alignment = _typedict()
_maxvals = _typedict()
_minvals = _typedict()
def _construct_lookups():
for name, val in typeinfo.iteritems():
if not isinstance(val, tuple):
continue
obj = val[-1]
nbytes[obj] = val[2] / 8
_alignment[obj] = val[3]
if (len(val) > 5):
_maxvals[obj] = val[4]
_minvals[obj] = val[5]
else:
_maxvals[obj] = None
_minvals[obj] = None
_construct_lookups()
def sctype2char(sctype):
sctype = obj2sctype(sctype)
if sctype is None:
raise ValueError, "unrecognized type"
return _sctype2char_dict[sctype]
# Create dictionary of casting functions that wrap sequences
# indexed by type or type character
cast = _typedict()
ScalarType = [_types.IntType, _types.FloatType,
_types.ComplexType, _types.LongType, _types.BooleanType,
_types.StringType, _types.UnicodeType, _types.BufferType]
ScalarType.extend(_sctype2char_dict.keys())
ScalarType = tuple(ScalarType)
for key in _sctype2char_dict.keys():
cast[key] = lambda x, k=key : array(x, copy=False).astype(k)
# Create the typestring lookup dictionary
_typestr = _typedict()
for key in _sctype2char_dict.keys():
if issubclass(key, allTypes['flexible']):
_typestr[key] = _sctype2char_dict[key]
else:
_typestr[key] = empty((1,),key).dtype.str[1:]
# Make sure all typestrings are in sctypeDict
for key, val in _typestr.items():
if val not in sctypeDict:
sctypeDict[val] = key
# Add additional strings to the sctypeDict
_toadd = ['int', 'float', 'complex', 'bool', 'object', 'string', ('str', allTypes['string_']),
'unicode', 'object', ('a', allTypes['string_'])]
for name in _toadd:
if isinstance(name, tuple):
sctypeDict[name[0]] = name[1]
else:
sctypeDict[name] = allTypes['%s_' % name]
del _toadd, name
# Now add the types we've determined to this module
for key in allTypes:
globals()[key] = allTypes[key]
__all__.append(key)
del key
typecodes = {'Character':'c',
'Integer':'bhilqp',
'UnsignedInteger':'BHILQP',
'Float':'fdg',
'Complex':'FDG',
'AllInteger':'bBhHiIlLqQpP',
'AllFloat':'fdgFDG',
'All':'?bhilqpBHILQPfdgFDGSUVO'}
# backwards compatibility --- deprecated name
typeDict = sctypeDict
typeNA = sctypeNA
# b -> boolean
# u -> unsigned integer
# i -> signed integer
# f -> floating point
# c -> complex
# S -> string
# U -> Unicode string
# V -> record
# O -> Python object
_kind_list = ['b', 'u', 'i', 'f', 'c', 'S', 'U', 'V', 'O']
__test_types = typecodes['AllInteger'][:-2]+typecodes['AllFloat']+'O'
__len_test_types = len(__test_types)
# Keep incrementing until a common type both can be coerced to
# is found. Otherwise, return None
def _find_common_coerce(a, b):
if a > b:
return a
try:
thisind = __test_types.index(a.char)
except ValueError:
return None
while thisind < __len_test_types:
newdtype = dtype(__test_types[thisind])
if newdtype >= b and newdtype >= a:
return newdtype
thisind += 1
return None
def find_common_type(array_types, scalar_types):
"""
Determine common type following standard coercion rules
Parameters
----------
array_types : sequence
A list of dtype convertible objects representing arrays
scalar_types : sequence
A list of dtype convertible objects representing scalars
Returns
-------
datatype : dtype
The common data-type which is the maximum of the array_types
ignoring the scalar_types unless the maximum of the scalar_types
is of a different kind.
If the kinds is not understood, then None is returned.
See Also
--------
dtype
"""
array_types = [dtype(x) for x in array_types]
scalar_types = [dtype(x) for x in scalar_types]
if len(scalar_types) == 0:
if len(array_types) == 0:
return None
else:
return max(array_types)
if len(array_types) == 0:
return max(scalar_types)
maxa = max(array_types)
maxsc = max(scalar_types)
try:
index_a = _kind_list.index(maxa.kind)
index_sc = _kind_list.index(maxsc.kind)
except ValueError:
return None
if index_sc > index_a:
return _find_common_coerce(maxsc,maxa)
else:
return maxa
|
efiring/numpy-work
|
numpy/core/numerictypes.py
|
Python
|
bsd-3-clause
| 20,785
|
from gpiozero import Button
import pygame.mixer
from pygame.mixer import Sound
from signal import pause
pygame.mixer.init()
button_sounds = {
Button(2): Sound("samples/drum_tom_mid_hard.wav"),
Button(3): Sound("samples/drum_cymbal_open.wav"),
}
for button, sound in button_sounds.items():
button.when_pressed = sound.play
pause()
|
waveform80/gpio-zero
|
docs/examples/music_box.py
|
Python
|
bsd-3-clause
| 346
|
# coding=utf-8
from django.db import models
from main.models import BaseModel
STAT_TYPES = (
('user_count', 'User count'),
('host_count', 'Host count'),
('ip_update_count', 'IP update count'),
)
class StatisticsEntry(BaseModel):
class Meta:
verbose_name_plural = 'Statistics Entries'
stat_type = models.CharField(max_length=32, choices=STAT_TYPES)
value = models.IntegerField()
def __unicode__(self):
return "%s: %d (%s)" % (self.stat_type, self.value, self.created)
|
fadenb/hopper.pw
|
hopperpw/stats/models.py
|
Python
|
bsd-3-clause
| 517
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
import logging
import emission.core.wrapper.wrapperbase as ecwb
class Incident(ecwb.WrapperBase):
props = {"loc": ecwb.WrapperBase.Access.RO, # geojson representation of the point
"ts": ecwb.WrapperBase.Access.RO, # timestamp representation of the point
"stress": ecwb.WrapperBase.Access.RO, # stress level (0 = no stress, 100 = max stress)
"local_dt": ecwb.WrapperBase.Access.RO, # searchable datetime in local time
"fmt_time": ecwb.WrapperBase.Access.RO # formatted time
}
enums = {}
geojson = ["loc"]
nullable = []
local_dates = ["local_dt"]
def _populateDependencies(self):
pass
|
e-mission/e-mission-server
|
emission/core/wrapper/incident.py
|
Python
|
bsd-3-clause
| 925
|
#!/usr/bin/env python
# FIXME -- eventually, we want more (all?) of these to work
imagedir = parent + "/libtiffpic/depth"
files = [
"flower-minisblack-02.tif", # 73x43 2-bit minisblack gray image
"flower-minisblack-04.tif", # 73x43 4-bit minisblack gray image
"flower-minisblack-06.tif", # 73x43 6-bit minisblack gray image
"flower-minisblack-08.tif", # 73x43 8-bit minisblack gray image
"flower-minisblack-10.tif", # 73x43 10-bit minisblack gray image
"flower-minisblack-12.tif", # 73x43 12-bit minisblack gray image
"flower-minisblack-14.tif", # 73x43 14-bit minisblack gray image
"flower-minisblack-16.tif", # 73x43 16-bit minisblack gray image
#FIXME "flower-minisblack-24.tif", # 73x43 24-bit minisblack gray image
"flower-minisblack-32.tif", # 73x43 32-bit minisblack gray image
"flower-palette-02.tif", #73x43 4-entry colormapped image
"flower-palette-04.tif", #73x43 16-entry colormapped image
"flower-palette-08.tif", #73x43 256-entry colormapped image
#FIXME "flower-palette-16.tif", # 73x43 65536-entry colormapped image
"flower-rgb-contig-02.tif", # 73x43 2-bit contiguous RGB image
"flower-rgb-contig-04.tif", # 73x43 4-bit contiguous RGB image
"flower-rgb-contig-08.tif", # 73x43 8-bit contiguous RGB image
"flower-rgb-contig-10.tif", # 73x43 10-bit contiguous RGB image
"flower-rgb-contig-12.tif", # 73x43 12-bit contiguous RGB image
"flower-rgb-contig-14.tif", # 73x43 14-bit contiguous RGB image
"flower-rgb-contig-16.tif", # 73x43 16-bit contiguous RGB image
#FIXME "flower-rgb-contig-24.tif", # 73x43 24-bit contiguous RGB image
"flower-rgb-contig-32.tif", # 73x43 32-bit contiguous RGB image
"flower-rgb-planar-02.tif", # 73x43 2-bit seperated RGB image
"flower-rgb-planar-04.tif", # 73x43 4-bit seperated RGB image
"flower-rgb-planar-08.tif", # 73x43 8-bit seperated RGB image
"flower-rgb-planar-10.tif", # 73x43 10-bit seperated RGB image
"flower-rgb-planar-12.tif", # 73x43 12-bit seperated RGB image
"flower-rgb-planar-14.tif", # 73x43 14-bit seperated RGB image
"flower-rgb-planar-16.tif", # 73x43 16-bit seperated RGB image
#FIXME "flower-rgb-planar-24.tif", # 73x43 24-bit seperated RGB image
#FIXME "flower-rgb-planar-32.tif", # 73x43 32-bit seperated RGB image
"flower-separated-contig-08.tif", # 73x43 8-bit contiguous CMYK image
"flower-separated-contig-16.tif", # 73x43 16-bit contiguous CMYK image
"flower-separated-planar-08.tif", # 73x43 8-bit separated CMYK image
"flower-separated-planar-16.tif", # 73x43 16-bit separated CMYK image
]
for f in files:
command += rw_command (imagedir, f)
print ("COMMAND= " + command)
|
YangYangTL/oiio
|
testsuite/tiff-depths/run.py
|
Python
|
bsd-3-clause
| 2,805
|
from __future__ import unicode_literals
import codecs
import logging
import sys
from io import BytesIO
from threading import Lock
import warnings
from django import http
from django.conf import settings
from django.core import signals
from django.core.handlers import base
from django.core.urlresolvers import set_script_prefix
from django.utils import datastructures
from django.utils.encoding import force_str, force_text
from django.utils.functional import cached_property
from django.utils import six
# For backwards compatibility -- lots of code uses this in the wild!
from django.http.response import REASON_PHRASES as STATUS_CODE_TEXT # NOQA
logger = logging.getLogger('django.request')
# encode() and decode() expect the charset to be a native string.
ISO_8859_1, UTF_8 = str('iso-8859-1'), str('utf-8')
class LimitedStream(object):
'''
LimitedStream wraps another stream in order to not allow reading from it
past specified amount of bytes.
'''
def __init__(self, stream, limit, buf_size=64 * 1024 * 1024):
self.stream = stream
self.remaining = limit
self.buffer = b''
self.buf_size = buf_size
def _read_limited(self, size=None):
if size is None or size > self.remaining:
size = self.remaining
if size == 0:
return b''
result = self.stream.read(size)
self.remaining -= len(result)
return result
def read(self, size=None):
if size is None:
result = self.buffer + self._read_limited()
self.buffer = b''
elif size < len(self.buffer):
result = self.buffer[:size]
self.buffer = self.buffer[size:]
else: # size >= len(self.buffer)
result = self.buffer + self._read_limited(size - len(self.buffer))
self.buffer = b''
return result
def readline(self, size=None):
while b'\n' not in self.buffer and \
(size is None or len(self.buffer) < size):
if size:
# since size is not None here, len(self.buffer) < size
chunk = self._read_limited(size - len(self.buffer))
else:
chunk = self._read_limited()
if not chunk:
break
self.buffer += chunk
sio = BytesIO(self.buffer)
if size:
line = sio.readline(size)
else:
line = sio.readline()
self.buffer = sio.read()
return line
class WSGIRequest(http.HttpRequest):
def __init__(self, environ):
script_name = get_script_name(environ)
path_info = get_path_info(environ)
if not path_info:
# Sometimes PATH_INFO exists, but is empty (e.g. accessing
# the SCRIPT_NAME URL without a trailing slash). We really need to
# operate as if they'd requested '/'. Not amazingly nice to force
# the path like this, but should be harmless.
path_info = '/'
self.environ = environ
self.path_info = path_info
self.path = '%s/%s' % (script_name.rstrip('/'), path_info.lstrip('/'))
self.META = environ
self.META['PATH_INFO'] = path_info
self.META['SCRIPT_NAME'] = script_name
self.method = environ['REQUEST_METHOD'].upper()
_, content_params = self._parse_content_type(environ.get('CONTENT_TYPE', ''))
if 'charset' in content_params:
try:
codecs.lookup(content_params['charset'])
except LookupError:
pass
else:
self.encoding = content_params['charset']
self._post_parse_error = False
try:
content_length = int(environ.get('CONTENT_LENGTH'))
except (ValueError, TypeError):
content_length = 0
self._stream = LimitedStream(self.environ['wsgi.input'], content_length)
self._read_started = False
self.resolver_match = None
def _get_scheme(self):
return self.environ.get('wsgi.url_scheme')
def _parse_content_type(self, ctype):
"""
Media Types parsing according to RFC 2616, section 3.7.
Returns the data type and parameters. For example:
Input: "text/plain; charset=iso-8859-1"
Output: ('text/plain', {'charset': 'iso-8859-1'})
"""
content_type, _, params = ctype.partition(';')
content_params = {}
for parameter in params.split(';'):
k, _, v = parameter.strip().partition('=')
content_params[k] = v
return content_type, content_params
def _get_request(self):
warnings.warn('`request.REQUEST` is deprecated, use `request.GET` or '
'`request.POST` instead.', PendingDeprecationWarning, 2)
if not hasattr(self, '_request'):
self._request = datastructures.MergeDict(self.POST, self.GET)
return self._request
@cached_property
def GET(self):
# The WSGI spec says 'QUERY_STRING' may be absent.
raw_query_string = get_bytes_from_wsgi(self.environ, 'QUERY_STRING', '')
return http.QueryDict(raw_query_string, encoding=self._encoding)
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
@cached_property
def COOKIES(self):
raw_cookie = get_str_from_wsgi(self.environ, 'HTTP_COOKIE', '')
return http.parse_cookie(raw_cookie)
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
POST = property(_get_post, _set_post)
FILES = property(_get_files)
REQUEST = property(_get_request)
class WSGIHandler(base.BaseHandler):
initLock = Lock()
request_class = WSGIRequest
def __call__(self, environ, start_response):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
with self.initLock:
try:
# Check that middleware is still uninitialised.
if self._request_middleware is None:
self.load_middleware()
except:
# Unload whatever middleware we got
self._request_middleware = None
raise
set_script_prefix(get_script_name(environ))
signals.request_started.send(sender=self.__class__)
try:
request = self.request_class(environ)
except UnicodeDecodeError:
logger.warning('Bad Request (UnicodeDecodeError)',
exc_info=sys.exc_info(),
extra={
'status_code': 400,
}
)
response = http.HttpResponseBadRequest()
else:
response = self.get_response(request)
response._handler_class = self.__class__
status = '%s %s' % (response.status_code, response.reason_phrase)
response_headers = [(str(k), str(v)) for k, v in response.items()]
for c in response.cookies.values():
response_headers.append((str('Set-Cookie'), str(c.output(header=''))))
start_response(force_str(status), response_headers)
return response
def get_path_info(environ):
"""
Returns the HTTP request's PATH_INFO as a unicode string.
"""
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '/')
# It'd be better to implement URI-to-IRI decoding, see #19508.
return path_info.decode(UTF_8)
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless the FORCE_SCRIPT_NAME setting is
set (to anything).
"""
if settings.FORCE_SCRIPT_NAME is not None:
return force_text(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every Web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = get_bytes_from_wsgi(environ, 'SCRIPT_URL', '')
if not script_url:
script_url = get_bytes_from_wsgi(environ, 'REDIRECT_URL', '')
if script_url:
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '')
script_name = script_url[:-len(path_info)]
else:
script_name = get_bytes_from_wsgi(environ, 'SCRIPT_NAME', '')
# It'd be better to implement URI-to-IRI decoding, see #19508.
return script_name.decode(UTF_8)
def get_bytes_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as bytes.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = environ.get(str(key), str(default))
# Under Python 3, non-ASCII values in the WSGI environ are arbitrarily
# decoded with ISO-8859-1. This is wrong for Django websites where UTF-8
# is the default. Re-encode to recover the original bytestring.
return value if six.PY2 else value.encode(ISO_8859_1)
def get_str_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as bytes.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = environ.get(str(key), str(default))
# Same comment as above
return value if six.PY2 else value.encode(ISO_8859_1).decode(UTF_8)
|
Beeblio/django
|
django/core/handlers/wsgi.py
|
Python
|
bsd-3-clause
| 9,989
|
from sympy import (Symbol, symbols, factorial, factorial2, binomial,
rf, ff, gamma, polygamma, EulerGamma, O, pi, nan,
oo, simplify, expand_func)
from sympy.functions.combinatorial.factorials import subfactorial
from sympy.utilities.pytest import XFAIL, raises
def test_rf_eval_apply():
x, y = symbols('x,y')
assert rf(nan, y) == nan
assert rf(x, y) == rf(x, y)
assert rf(oo, 0) == 1
assert rf(-oo, 0) == 1
assert rf(oo, 6) == oo
assert rf(-oo, 7) == -oo
assert rf(oo, -6) == oo
assert rf(-oo, -7) == oo
assert rf(x, 0) == 1
assert rf(x, 1) == x
assert rf(x, 2) == x*(x + 1)
assert rf(x, 3) == x*(x + 1)*(x + 2)
assert rf(x, 5) == x*(x + 1)*(x + 2)*(x + 3)*(x + 4)
assert rf(x, -1) == 1/(x - 1)
assert rf(x, -2) == 1/((x - 1)*(x - 2))
assert rf(x, -3) == 1/((x - 1)*(x - 2)*(x - 3))
assert rf(1, 100) == factorial(100)
def test_ff_eval_apply():
x, y = symbols('x,y')
assert ff(nan, y) == nan
assert ff(x, y) == ff(x, y)
assert ff(oo, 0) == 1
assert ff(-oo, 0) == 1
assert ff(oo, 6) == oo
assert ff(-oo, 7) == -oo
assert ff(oo, -6) == oo
assert ff(-oo, -7) == oo
assert ff(x, 0) == 1
assert ff(x, 1) == x
assert ff(x, 2) == x*(x - 1)
assert ff(x, 3) == x*(x - 1)*(x - 2)
assert ff(x, 5) == x*(x - 1)*(x - 2)*(x - 3)*(x - 4)
assert ff(x, -1) == 1/(x + 1)
assert ff(x, -2) == 1/((x + 1)*(x + 2))
assert ff(x, -3) == 1/((x + 1)*(x + 2)*(x + 3))
assert ff(100, 100) == factorial(100)
def test_factorial():
n = Symbol('n', integer=True)
k = Symbol('k', integer=True, positive=True)
assert factorial(-2) == 0
assert factorial(0) == 1
assert factorial(7) == 5040
assert factorial(n).func == factorial
assert factorial(2*n).func == factorial
assert factorial(n).is_integer
assert factorial(n).is_positive is None
assert factorial(k).is_positive
def test_factorial_diff():
n = Symbol('n', integer=True)
assert factorial(n).diff(n) == \
gamma(1 + n)*polygamma(0, 1 + n)
assert factorial(n**2).diff(n) == \
2*n*gamma(1 + n**2)*polygamma(0, 1 + n**2)
def test_factorial_series():
n = Symbol('n', integer=True)
assert factorial(n).series(n, 0, 3) == \
1 - n*EulerGamma + n**2*(EulerGamma**2/2 + pi**2/12) + O(n**3)
def test_factorial_rewrite():
n = Symbol('n', integer=True)
assert factorial(n).rewrite(gamma) == gamma(n + 1)
def test_factorial2():
n = Symbol('n', integer=True)
assert factorial2(-1) == 1
assert factorial2(0) == 1
assert factorial2(7) == 105
assert factorial2(8) == 384
assert factorial2(n).func == factorial2
def test_binomial():
n = Symbol('n', integer=True)
k = Symbol('k', integer=True)
u = Symbol('v', negative=True)
v = Symbol('m', positive=True)
assert binomial(0, 0) == 1
assert binomial(1, 1) == 1
assert binomial(10, 10) == 1
assert binomial(1, 2) == 0
assert binomial(1, -1) == 0
assert binomial(-1, 1) == -1
assert binomial(-10, 1) == -10
assert binomial(-10, 7) == -11440
assert binomial(n, -1) == 0
assert binomial(n, 0) == 1
assert expand_func(binomial(n, 1)) == n
assert expand_func(binomial(n, 2)) == n*(n - 1)/2
assert expand_func(binomial(n, n - 2)) == n*(n - 1)/2
assert expand_func(binomial(n, n - 1)) == n
assert binomial(n, 3).func == binomial
assert binomial(n, 3).expand(func=True) == n**3/6 - n**2/2 + n/3
assert expand_func(binomial(n, 3)) == n*(n - 2)*(n - 1)/6
assert binomial(n, n) == 1
assert binomial(n, n + 1) == 0
assert binomial(n, u) == 0
assert binomial(n, v).func == binomial
assert binomial(n, k).func == binomial
assert binomial(n, n + v) == 0
assert expand_func(binomial(n, n-3)) == n*(n - 2)*(n - 1)/6
def test_binomial_diff():
n = Symbol('n', integer=True)
k = Symbol('k', integer=True)
assert binomial(n, k).diff(n) == \
(-polygamma(0, 1 + n - k) + polygamma(0, 1 + n))*binomial(n, k)
assert binomial(n**2, k**3).diff(n) == \
2*n*(-polygamma(
0, 1 + n**2 - k**3) + polygamma(0, 1 + n**2))*binomial(n**2, k**3)
assert binomial(n, k).diff(k) == \
(-polygamma(0, 1 + k) + polygamma(0, 1 + n - k))*binomial(n, k)
assert binomial(n**2, k**3).diff(k) == \
3*k**2*(-polygamma(
0, 1 + k**3) + polygamma(0, 1 + n**2 - k**3))*binomial(n**2, k**3)
def test_binomial_rewrite():
n = Symbol('n', integer=True)
k = Symbol('k', integer=True)
assert binomial(n, k).rewrite(
factorial) == factorial(n)/(factorial(k)*factorial(n - k))
assert binomial(
n, k).rewrite(gamma) == gamma(n + 1)/(gamma(k + 1)*gamma(n - k + 1))
@XFAIL
def test_factorial_simplify_fail():
# simplify(factorial(x + 1).diff(x) - ((x + 1)*factorial(x)).diff(x))) == 0
from sympy.abc import x
assert simplify(x*polygamma(0, x + 1) - x*polygamma(0, x + 2) +
polygamma(0, x + 1) - polygamma(0, x + 2) + 1) == 0
def test_subfactorial():
assert all(subfactorial(i) == ans for i, ans in enumerate(
[1, 0, 1, 2, 9, 44, 265, 1854, 14833, 133496]))
raises(ValueError, lambda: subfactorial(0.1))
raises(ValueError, lambda: subfactorial(-2))
|
kmacinnis/sympy
|
sympy/functions/combinatorial/tests/test_comb_factorials.py
|
Python
|
bsd-3-clause
| 5,343
|
import re
from django import forms
from django.conf import settings
from django.template.defaultfilters import slugify
from tower import ugettext_lazy as _lazy
from kitsune.products.models import Product, Topic
from kitsune.sumo.form_fields import MultiUsernameField, StrippedCharField
from kitsune.wiki.config import SIGNIFICANCES, CATEGORIES
from kitsune.wiki.models import (
Document, Revision, MAX_REVISION_COMMENT_LENGTH)
from kitsune.wiki.tasks import add_short_links
from kitsune.wiki.widgets import (
RadioFieldRendererWithHelpText, ProductTopicsAndSubtopicsWidget)
TITLE_REQUIRED = _lazy(u'Please provide a title.')
TITLE_SHORT = _lazy(u'The title is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
TITLE_LONG = _lazy(u'Please keep the length of the title to %(limit_value)s '
u'characters or less. It is currently %(show_value)s '
u'characters.')
SLUG_REQUIRED = _lazy(u'Please provide a slug.')
SLUG_INVALID = _lazy(u'The slug provided is not valid.')
SLUG_SHORT = _lazy(u'The slug is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
SLUG_LONG = _lazy(u'Please keep the length of the slug to %(limit_value)s '
u'characters or less. It is currently %(show_value)s '
u'characters.')
SUMMARY_REQUIRED = _lazy(u'Please provide a summary.')
SUMMARY_SHORT = _lazy(u'The summary is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
SUMMARY_LONG = _lazy(u'Please keep the length of the summary to '
u'%(limit_value)s characters or less. It is currently '
u'%(show_value)s characters.')
CONTENT_REQUIRED = _lazy(u'Please provide content.')
CONTENT_SHORT = _lazy(u'The content is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
CONTENT_LONG = _lazy(u'Please keep the length of the content to '
u'%(limit_value)s characters or less. It is currently '
u'%(show_value)s characters.')
COMMENT_LONG = _lazy(u'Please keep the length of the comment to '
u'%(limit_value)s characters or less. It is currently '
u'%(show_value)s characters.')
PRODUCT_REQUIRED = _lazy(u'Please select at least one product.')
TOPIC_REQUIRED = _lazy(u'Please select at least one topic.')
class DocumentForm(forms.ModelForm):
"""Form to create/edit a document."""
def __init__(self, *args, **kwargs):
# Quasi-kwargs:
can_archive = kwargs.pop('can_archive', False)
can_edit_needs_change = kwargs.pop('can_edit_needs_change', False)
initial_title = kwargs.pop('initial_title', '')
super(DocumentForm, self).__init__(*args, **kwargs)
title_field = self.fields['title']
title_field.initial = initial_title
slug_field = self.fields['slug']
slug_field.initial = slugify(initial_title)
topics_field = self.fields['topics']
topics_field.choices = Topic.objects.values_list('id', 'title')
products_field = self.fields['products']
products_field.choices = Product.objects.values_list('id', 'title')
# If user hasn't permission to frob is_archived, remove the field. This
# causes save() to skip it as well.
if not can_archive:
del self.fields['is_archived']
# If user hasn't permission to mess with needs_change*, remove the
# fields. This causes save() to skip it as well.
if not can_edit_needs_change:
del self.fields['needs_change']
del self.fields['needs_change_comment']
title = StrippedCharField(
min_length=5, max_length=255,
widget=forms.TextInput(),
label=_lazy(u'Title:'),
help_text=_lazy(u'Title of article'),
error_messages={'required': TITLE_REQUIRED,
'min_length': TITLE_SHORT,
'max_length': TITLE_LONG})
# We don't use forms.SlugField because it is too strict in
# what it allows (English/Roman alpha-numeric characters and dashes).
# Instead, we do custom validation in `clean_slug` below.
slug = StrippedCharField(
min_length=3, max_length=255,
widget=forms.TextInput(),
label=_lazy(u'Slug:'),
help_text=_lazy(u'Article URL'),
error_messages={'required': SLUG_REQUIRED,
'min_length': SLUG_SHORT,
'max_length': SLUG_LONG})
products = forms.MultipleChoiceField(
label=_lazy(u'Relevant to:'),
required=False,
widget=forms.CheckboxSelectMultiple())
is_localizable = forms.BooleanField(
initial=True,
label=_lazy(u'Allow translations:'),
required=False)
is_archived = forms.BooleanField(
label=_lazy(u'Obsolete:'),
required=False)
allow_discussion = forms.BooleanField(
label=_lazy(u'Allow discussion on this article?'),
initial=True,
required=False)
category = forms.ChoiceField(
choices=CATEGORIES,
# Required for non-translations, which is
# enforced in Document.clean().
required=False,
label=_lazy(u'Category:'),
help_text=_lazy(u'Type of article'))
topics = forms.MultipleChoiceField(
label=_lazy(u'Topics:'),
required=False,
widget=ProductTopicsAndSubtopicsWidget())
locale = forms.CharField(widget=forms.HiddenInput())
needs_change = forms.BooleanField(
label=_lazy(u'Needs change:'),
initial=False,
required=False)
needs_change_comment = forms.CharField(
label=_lazy(u'Comment:'),
widget=forms.Textarea(),
required=False)
def clean_slug(self):
slug = self.cleaned_data['slug']
# Blacklist /, ?, % and +,
if not re.compile(r'^[^/^\+^\?%]+$').match(slug):
raise forms.ValidationError(SLUG_INVALID)
return slug
def clean(self):
c = super(DocumentForm, self).clean()
locale = c.get('locale')
# Products are required for en-US
products = c.get('products')
if (locale == settings.WIKI_DEFAULT_LANGUAGE and
(not products or len(products) < 1)):
raise forms.ValidationError(PRODUCT_REQUIRED)
# Topics are required for en-US
topics = c.get('topics')
if (locale == settings.WIKI_DEFAULT_LANGUAGE and
(not topics or len(topics) < 1)):
raise forms.ValidationError(TOPIC_REQUIRED)
return c
class Meta:
model = Document
fields = ('title', 'slug', 'category', 'is_localizable', 'products',
'topics', 'locale', 'is_archived', 'allow_discussion',
'needs_change', 'needs_change_comment')
def save(self, parent_doc, **kwargs):
"""Persist the Document form, and return the saved Document."""
doc = super(DocumentForm, self).save(commit=False, **kwargs)
doc.parent = parent_doc
# If document doesn't need change, clear out the comment.
if not doc.needs_change:
doc.needs_change_comment = ''
# Create the share link if it doesn't exist and is in
# a category it should show for.
doc.save()
if (doc.category in settings.IA_DEFAULT_CATEGORIES
and not doc.share_link):
# This operates under the constraints of passing in a list.
add_short_links.delay([doc.pk])
self.save_m2m()
if parent_doc:
# Products are not set on translations.
doc.products.remove(*[p for p in doc.products.all()])
return doc
class RevisionForm(forms.ModelForm):
"""Form to create new revisions."""
keywords = StrippedCharField(required=False,
label=_lazy(u'Keywords:'),
help_text=_lazy(u'Affects search results'))
summary = StrippedCharField(
min_length=5, max_length=1000, widget=forms.Textarea(),
label=_lazy(u'Search result summary:'),
help_text=_lazy(u'Only displayed on search results page'),
error_messages={'required': SUMMARY_REQUIRED,
'min_length': SUMMARY_SHORT,
'max_length': SUMMARY_LONG})
content = StrippedCharField(
min_length=5, max_length=100000,
label=_lazy(u'Content:'),
widget=forms.Textarea(),
error_messages={'required': CONTENT_REQUIRED,
'min_length': CONTENT_SHORT,
'max_length': CONTENT_LONG})
expires = forms.DateField(
label=_lazy(u'Expiry date:'),
required=False)
comment = StrippedCharField(required=False, label=_lazy(u'Comment:'))
class Meta(object):
model = Revision
fields = ('keywords', 'summary', 'content', 'comment', 'based_on',
'expires')
def __init__(self, *args, **kwargs):
super(RevisionForm, self).__init__(*args, **kwargs)
self.fields['based_on'].widget = forms.HiddenInput()
self.fields['comment'].widget = forms.TextInput(
attrs={'maxlength': MAX_REVISION_COMMENT_LENGTH})
def save(self, creator, document, based_on_id=None, base_rev=None,
**kwargs):
"""Persist me, and return the saved Revision.
Take several other necessary pieces of data that aren't from the
form.
"""
# Throws a TypeError if somebody passes in a commit kwarg:
new_rev = super(RevisionForm, self).save(commit=False, **kwargs)
new_rev.document = document
new_rev.creator = creator
if based_on_id:
new_rev.based_on_id = based_on_id
# If the document doesn't allow the revision creator to edit the
# keywords, keep the old value.
if base_rev and not document.allows(creator, 'edit_keywords'):
new_rev.keywords = base_rev.keywords
new_rev.save()
return new_rev
class ReviewForm(forms.Form):
comment = StrippedCharField(max_length=2000, widget=forms.Textarea(),
required=False, label=_lazy(u'Comment:'),
error_messages={'max_length': COMMENT_LONG})
_widget = forms.RadioSelect(renderer=RadioFieldRendererWithHelpText)
significance = forms.TypedChoiceField(
label=_lazy(u'Significance:'),
choices=SIGNIFICANCES,
initial=SIGNIFICANCES[1][0],
required=False, widget=_widget,
coerce=int, empty_value=SIGNIFICANCES[1][0])
is_ready_for_localization = forms.BooleanField(
initial=False,
label=_lazy(u'Ready for localization'),
required=False)
needs_change = forms.BooleanField(
label=_lazy(u'Needs change'),
initial=False,
required=False)
needs_change_comment = forms.CharField(
label=_lazy(u'Comment:'),
widget=forms.Textarea(),
required=False)
class AddContributorForm(forms.Form):
"""Form to add contributors to a document."""
users = MultiUsernameField(
widget=forms.TextInput(attrs={'placeholder': _lazy(u'username'),
'class': 'user-autocomplete'}))
languages = [('', 'Any')] + [(l[0], u'{1} ({0})'.format(*l))
for l in settings.LANGUAGE_CHOICES]
class RevisionFilterForm(forms.Form):
"""Form to filter a list of revisions."""
locale = forms.ChoiceField(label=_lazy(u'Locale:'), choices=languages,
required=False)
users = MultiUsernameField(label=_lazy(u'Users:'), required=False)
start = forms.DateField(label=_lazy(u'Start:'), required=False)
end = forms.DateField(label=_lazy(u'End:'), required=False)
|
philipp-sumo/kitsune
|
kitsune/wiki/forms.py
|
Python
|
bsd-3-clause
| 12,054
|
#!/usr/bin/env python
"""FEM library
Demonstrates some simple finite element definitions, and computes a mass
matrix
$ python fem.py
[ 1/60, 0, -1/360, 0, -1/90, -1/360]
[ 0, 4/45, 0, 2/45, 2/45, -1/90]
[-1/360, 0, 1/60, -1/90, 0, -1/360]
[ 0, 2/45, -1/90, 4/45, 2/45, 0]
[ -1/90, 2/45, 0, 2/45, 4/45, 0]
[-1/360, -1/90, -1/360, 0, 0, 1/60]
"""
from sympy import symbols, Symbol, factorial, Rational, zeros, div, eye, \
integrate, diff, pprint, reduced
x, y, z = symbols('x,y,z')
class ReferenceSimplex:
def __init__(self, nsd):
self.nsd = nsd
coords = []
if nsd <= 3:
coords = symbols('x,y,z')[:nsd]
else:
coords = []
for d in range(0,nsd):
coords.append(Symbol("x_%d" % d))
self.coords = coords
def integrate(self,f):
coords = self.coords
nsd = self.nsd
limit = 1
for p in coords:
limit -= p
intf = f
for d in range(0,nsd):
p = coords[d]
limit += p
intf = integrate(intf, (p, 0, limit))
return intf
def bernstein_space(order, nsd):
if nsd > 3:
raise RuntimeError("Bernstein only implemented in 1D, 2D, and 3D")
sum = 0
basis = []
coeff = []
if nsd == 1:
b1, b2 = x, 1-x
for o1 in range(0,order+1):
for o2 in range(0,order+1):
if o1 + o2 == order:
aij = Symbol("a_%d_%d" % (o1,o2))
sum += aij*binomial(order,o1)*pow(b1, o1)*pow(b2, o2)
basis.append(binomial(order,o1)*pow(b1, o1)*pow(b2, o2))
coeff.append(aij)
if nsd == 2:
b1, b2, b3 = x, y, 1-x-y
for o1 in range(0,order+1):
for o2 in range(0,order+1):
for o3 in range(0,order+1):
if o1 + o2 + o3 == order:
aij = Symbol("a_%d_%d_%d" % (o1,o2,o3))
fac = factorial(order) / (factorial(o1)*factorial(o2)*factorial(o3))
sum += aij*fac*pow(b1, o1)*pow(b2, o2)*pow(b3, o3)
basis.append(fac*pow(b1, o1)*pow(b2, o2)*pow(b3, o3))
coeff.append(aij)
if nsd == 3:
b1, b2, b3, b4 = x, y, z, 1-x-y-z
for o1 in range(0,order+1):
for o2 in range(0,order+1):
for o3 in range(0,order+1):
for o4 in range(0,order+1):
if o1 + o2 + o3 + o4 == order:
aij = Symbol("a_%d_%d_%d_%d" % (o1,o2,o3,o4))
fac = factorial(order)/ (factorial(o1)*factorial(o2)*factorial(o3)*factorial(o4))
sum += aij*fac*pow(b1, o1)*pow(b2, o2)*pow(b3, o3)*pow(b4, o4)
basis.append(fac*pow(b1, o1)*pow(b2, o2)*pow(b3, o3)*pow(b4, o4))
coeff.append(aij)
return sum, coeff, basis
def create_point_set(order, nsd):
h = Rational(1,order)
set = []
if nsd == 1:
for i in range(0, order+1):
x = i*h
if x <= 1:
set.append((x,y))
if nsd == 2:
for i in range(0, order+1):
x = i*h
for j in range(0, order+1):
y = j*h
if x + y <= 1:
set.append((x,y))
if nsd == 3:
for i in range(0, order+1):
x = i*h
for j in range(0, order+1):
y = j*h
for k in range(0, order+1):
z = j*h
if x + y + z <= 1:
set.append((x,y,z))
return set
def create_matrix(equations, coeffs):
A = zeros(len(equations))
i = 0; j = 0
for j in range(0, len(coeffs)):
c = coeffs[j]
for i in range(0, len(equations)):
e = equations[i]
d, _ = reduced(e, [c])
A[i,j] = d[0]
return A
class Lagrange:
def __init__(self,nsd, order):
self.nsd = nsd
self.order = order
self.compute_basis()
def nbf(self):
return len(self.N)
def compute_basis(self):
order = self.order
nsd = self.nsd
N = []
pol, coeffs, basis = bernstein_space(order, nsd)
points = create_point_set(order, nsd)
equations = []
for p in points:
ex = pol.subs(x, p[0])
if nsd > 1:
ex = ex.subs(y, p[1])
if nsd > 2:
ex = ex.subs(z, p[2])
equations.append(ex )
A = create_matrix(equations, coeffs)
Ainv = A.inv()
b = eye(len(equations))
xx = Ainv*b
for i in range(0,len(equations)):
Ni = pol
for j in range(0,len(coeffs)):
Ni = Ni.subs(coeffs[j], xx[j,i])
N.append(Ni)
self.N = N
def main():
t = ReferenceSimplex(2)
fe = Lagrange(2,2)
u = 0
#compute u = sum_i u_i N_i
us = []
for i in range(0, fe.nbf()):
ui = Symbol("u_%d" % i)
us.append(ui)
u += ui*fe.N[i]
J = zeros(fe.nbf())
for i in range(0, fe.nbf()):
Fi = u*fe.N[i]
print Fi
for j in range(0, fe.nbf()):
uj = us[j]
integrands = diff(Fi, uj)
print integrands
J[j,i] = t.integrate(integrands)
pprint(J)
if __name__ == "__main__":
main()
|
flacjacket/sympy
|
examples/advanced/fem.py
|
Python
|
bsd-3-clause
| 5,414
|
##########################################################################
#
# Copyright (c) 2008-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import os
import unittest
import maya.cmds
import maya.OpenMaya
import IECore
import IECoreMaya
class TestOp( IECore.Op ) :
def __init__( self ) :
IECore.Op.__init__( self, "Tests stuff",
IECore.IntParameter(
name = "result",
description = "",
defaultValue = 0
)
)
self.parameters().addParameters(
[
IECore.IntParameter(
name = "i",
description = "i",
defaultValue = 1
),
]
)
def doOperation( self, args ) :
return IECore.IntData( 10 )
class FnParameterisedHolderTest( IECoreMaya.TestCase ) :
def test( self ) :
node = maya.cmds.createNode( "ieOpHolderNode" )
fnPH = IECoreMaya.FnParameterisedHolder( node )
self.assertEqual( fnPH.getParameterised(), ( None, "", 0, "" ) )
op = TestOp()
fnPH.setParameterised( op )
parameterisedTuple = fnPH.getParameterised()
self.assert_( parameterisedTuple[0].isSame( op ) )
self.assertEqual( parameterisedTuple[1:], ( "", 0, "" ) )
self.assertEqual( parameterisedTuple[0](), IECore.IntData( 10 ) )
iPlug = fnPH.parameterPlug( op["i"] )
self.assert_( isinstance( iPlug, maya.OpenMaya.MPlug ) )
self.assert_( iPlug.asInt(), 1 )
self.assert_( fnPH.plugParameter( iPlug ).isSame( op["i"] ) )
self.assert_( fnPH.plugParameter( iPlug.name() ).isSame( op["i"] ) )
iPlug.setInt( 2 )
fnPH.setParameterisedValue( op["i"] )
self.assert_( op["i"].getNumericValue(), 2 )
op["i"].setNumericValue( 3 )
fnPH.setNodeValue( op["i"] )
self.assert_( iPlug.asInt(), 3 )
iPlug.setInt( 10 )
fnPH.setParameterisedValues()
self.assert_( op["i"].getNumericValue(), 10 )
op["i"].setNumericValue( 11 )
fnPH.setNodeValues()
self.assert_( iPlug.asInt(), 11 )
def testFullPathName( self ) :
node = maya.cmds.createNode( "ieOpHolderNode" )
fnPH = IECoreMaya.FnParameterisedHolder( node )
self.assertEqual( node, fnPH.fullPathName() )
procedural = maya.cmds.createNode( "ieProceduralHolder" )
fnPH = IECoreMaya.FnParameterisedHolder( procedural )
self.assertEqual( maya.cmds.ls( procedural, long=True )[0], fnPH.fullPathName() )
def testPlugParameterWithNonUniqueNames( self ) :
node = maya.cmds.createNode( "ieProceduralHolder" )
node2 = maya.cmds.createNode( "ieProceduralHolder" )
node = maya.cmds.ls( maya.cmds.rename( node, "iAmNotUnique" ), long=True )[0]
node2 = maya.cmds.ls( maya.cmds.rename( node2, "iAmNotUnique" ), long=True )[0]
fnPH = IECoreMaya.FnProceduralHolder( node )
proc = IECore.ReadProcedural()
fnPH.setParameterised( proc )
self.assert_( fnPH.getParameterised()[0].isSame( proc ) )
fnPH2 = IECoreMaya.FnProceduralHolder( node2 )
proc2 = IECore.ReadProcedural()
fnPH2.setParameterised( proc2 )
self.assert_( fnPH2.getParameterised()[0].isSame( proc2 ) )
# check that each function set references a different node.
self.assert_( fnPH.object()!=fnPH2.object() )
self.assert_( fnPH.fullPathName()!=fnPH2.fullPathName() )
plug = fnPH.parameterPlug( proc["motion"]["blur"] )
plug2 = fnPH2.parameterPlug( proc2["motion"]["blur"] )
self.assertEqual( plug.node(), fnPH.object() )
self.assertEqual( plug2.node(), fnPH2.object() )
self.assertEqual( fnPH.parameterPlugPath( proc["motion"]["blur"] ), "|transform1|iAmNotUnique.parm_motion_blur" )
self.assertEqual( fnPH2.parameterPlugPath( proc2["motion"]["blur"] ), "|transform2|iAmNotUnique.parm_motion_blur" )
self.assert_( maya.cmds.isConnected( "time1.outTime", fnPH.parameterPlugPath( proc["files"]["frame"] ), iuc=True ) )
self.assert_( maya.cmds.isConnected( "time1.outTime", fnPH2.parameterPlugPath( proc2["files"]["frame"] ), iuc=True ) )
def testSetNodeValuesUndo( self ) :
# make an opholder
##########################################################################
node = maya.cmds.createNode( "ieOpHolderNode" )
fnPH = IECoreMaya.FnParameterisedHolder( node )
op = IECore.ClassLoader.defaultOpLoader().load( "parameterTypes", 1 )()
op.parameters().removeParameter( "m" ) # no color4f support in maya
fnPH.setParameterised( op )
# check we have the starting values we expect
###########################################################################
self.assertEqual( op["a"].getNumericValue(), 1 )
aPlug = fnPH.parameterPlug( op["a"] )
self.assertEqual( aPlug.asInt(), 1 )
self.assertEqual( op["b"].getNumericValue(), 2 )
bPlug = fnPH.parameterPlug( op["b"] )
self.assertEqual( bPlug.asFloat(), 2 )
self.assertEqual( op["c"].getNumericValue(), 3 )
cPlug = fnPH.parameterPlug( op["c"] )
self.assertEqual( cPlug.asDouble(), 3 )
self.assertEqual( op["d"].getTypedValue(), "ssss" )
dPlug = fnPH.parameterPlug( op["d"] )
self.assertEqual( dPlug.asString(), "ssss" )
self.assertEqual( op["e"].getValue(), IECore.IntVectorData( [ 4, -1, 2 ] ) )
ePlug = fnPH.parameterPlug( op["e"] )
fnE = maya.OpenMaya.MFnIntArrayData( ePlug.asMObject() )
self.assertEqual( fnE[0], 4 )
self.assertEqual( fnE[1], -1 )
self.assertEqual( fnE[2], 2 )
self.assertEqual( fnE.length(), 3 )
self.assertEqual( op["f"].getValue(), IECore.StringVectorData( [ "one", "two", "three" ] ) )
fPlug = fnPH.parameterPlug( op["f"] )
fnF = maya.OpenMaya.MFnStringArrayData( fPlug.asMObject() )
fList = []
fnF.copyTo( fList )
self.assertEqual( fList, [ "one", "two", "three" ] )
self.assertEqual( op["g"].getTypedValue(), IECore.V2f( 1, 2 ) )
gPlug = fnPH.parameterPlug( op["g"] )
self.assertEqual( gPlug.child( 0 ).asFloat(), 1 )
self.assertEqual( gPlug.child( 1 ).asFloat(), 2 )
self.assertEqual( op["h"].getTypedValue(), IECore.V3f( 1, 1, 1 ) )
hPlug = fnPH.parameterPlug( op["h"] )
self.assertEqual( hPlug.child( 0 ).asFloat(), 1 )
self.assertEqual( hPlug.child( 1 ).asFloat(), 1 )
self.assertEqual( hPlug.child( 2 ).asFloat(), 1 )
self.assertEqual( op["q"].getTypedValue(), False )
qPlug = fnPH.parameterPlug( op["q"] )
self.assertEqual( qPlug.asBool(), False )
self.assertEqual( op["t"].getTypedValue(), IECore.Box3f( IECore.V3f( -1 ), IECore.V3f( 1 ) ) )
tPlug = fnPH.parameterPlug( op["t"] )
self.assertEqual( tPlug.child( 0 ).child( 0 ).asFloat(), -1 )
self.assertEqual( tPlug.child( 0 ).child( 1 ).asFloat(), -1 )
self.assertEqual( tPlug.child( 0 ).child( 2 ).asFloat(), -1 )
self.assertEqual( tPlug.child( 1 ).child( 0 ).asFloat(), 1 )
self.assertEqual( tPlug.child( 1 ).child( 1 ).asFloat(), 1 )
self.assertEqual( tPlug.child( 1 ).child( 2 ).asFloat(), 1 )
# change all the node values, making sure undo is enabled
#############################################################################
self.assert_( maya.cmds.undoInfo( query=True, state=True ) )
# change the parameters
op["a"].setNumericValue( 10 )
op["b"].setNumericValue( 100 )
op["c"].setNumericValue( 12 )
op["d"].setTypedValue( "a" )
op["e"].setValue( IECore.IntVectorData( [ 1, 2, 3, 4 ] ) )
op["f"].setValue( IECore.StringVectorData( [ "hi" ] ) )
op["g"].setTypedValue( IECore.V2f( 10, 100 ) )
op["h"].setTypedValue( IECore.V3f( -1, -2, -3 ) )
op["q"].setTypedValue( True )
op["t"].setTypedValue( IECore.Box3f( IECore.V3f( -10 ), IECore.V3f( 0 ) ) )
# check they are changed
self.assertEqual( op["a"].getNumericValue(), 10 )
self.assertEqual( op["b"].getNumericValue(), 100 )
self.assertEqual( op["c"].getNumericValue(), 12 )
self.assertEqual( op["d"].getTypedValue(), "a" )
self.assertEqual( op["e"].getValue(), IECore.IntVectorData( [ 1, 2, 3, 4 ] ) )
self.assertEqual( op["f"].getValue(), IECore.StringVectorData( [ "hi" ] ) )
self.assertEqual( op["g"].getTypedValue(), IECore.V2f( 10, 100 ) )
self.assertEqual( op["h"].getTypedValue(), IECore.V3f( -1, -2, -3 ) )
self.assertEqual( op["q"].getTypedValue(), True )
self.assertEqual( op["t"].getTypedValue(), IECore.Box3f( IECore.V3f( -10 ), IECore.V3f( 0 ) ) )
# push the changes onto the node
fnPH.setNodeValues()
# check the node values are changed
#############################################################################
self.assertEqual( aPlug.asInt(), 10 )
self.assertEqual( bPlug.asFloat(), 100 )
self.assertEqual( cPlug.asDouble(), 12 )
self.assertEqual( dPlug.asString(), "a" )
fnE = maya.OpenMaya.MFnIntArrayData( ePlug.asMObject() )
self.assertEqual( fnE[0], 1 )
self.assertEqual( fnE[1], 2 )
self.assertEqual( fnE[2], 3 )
self.assertEqual( fnE[3], 4 )
self.assertEqual( fnE.length(), 4 )
fnF = maya.OpenMaya.MFnStringArrayData( fPlug.asMObject() )
fList = []
fnF.copyTo( fList )
self.assertEqual( fList, [ "hi" ] )
self.assertEqual( gPlug.child( 0 ).asFloat(), 10 )
self.assertEqual( gPlug.child( 1 ).asFloat(), 100 )
self.assertEqual( hPlug.child( 0 ).asFloat(), -1 )
self.assertEqual( hPlug.child( 1 ).asFloat(), -2 )
self.assertEqual( hPlug.child( 2 ).asFloat(), -3 )
self.assertEqual( qPlug.asBool(), True )
self.assertEqual( tPlug.child( 0 ).child( 0 ).asFloat(), -10 )
self.assertEqual( tPlug.child( 0 ).child( 1 ).asFloat(), -10 )
self.assertEqual( tPlug.child( 0 ).child( 2 ).asFloat(), -10 )
self.assertEqual( tPlug.child( 1 ).child( 0 ).asFloat(), 0 )
self.assertEqual( tPlug.child( 1 ).child( 1 ).asFloat(), 0 )
self.assertEqual( tPlug.child( 1 ).child( 2 ).asFloat(), 0 )
# check that the parameter values are unchanged in the process of
# pushing them to maya
#############################################################################
self.assertEqual( op["a"].getNumericValue(), 10 )
self.assertEqual( op["b"].getNumericValue(), 100 )
self.assertEqual( op["c"].getNumericValue(), 12 )
self.assertEqual( op["d"].getTypedValue(), "a" )
self.assertEqual( op["e"].getValue(), IECore.IntVectorData( [ 1, 2, 3, 4 ] ) )
self.assertEqual( op["f"].getValue(), IECore.StringVectorData( [ "hi" ] ) )
self.assertEqual( op["g"].getTypedValue(), IECore.V2f( 10, 100 ) )
self.assertEqual( op["h"].getTypedValue(), IECore.V3f( -1, -2, -3 ) )
self.assertEqual( op["q"].getTypedValue(), True )
self.assertEqual( op["t"].getTypedValue(), IECore.Box3f( IECore.V3f( -10 ), IECore.V3f( 0 ) ) )
# undo, and check the node values are back to before
#############################################################################
maya.cmds.undo()
self.assertEqual( aPlug.asInt(), 1 )
self.assertEqual( bPlug.asFloat(), 2 )
self.assertEqual( cPlug.asDouble(), 3 )
self.assertEqual( dPlug.asString(), "ssss" )
fnE = maya.OpenMaya.MFnIntArrayData( ePlug.asMObject() )
self.assertEqual( fnE[0], 4 )
self.assertEqual( fnE[1], -1 )
self.assertEqual( fnE[2], 2 )
self.assertEqual( fnE.length(), 3 )
fnF = maya.OpenMaya.MFnStringArrayData( fPlug.asMObject() )
fList = []
fnF.copyTo( fList )
self.assertEqual( fList, [ "one", "two", "three" ] )
self.assertEqual( gPlug.child( 0 ).asFloat(), 1 )
self.assertEqual( gPlug.child( 1 ).asFloat(), 2 )
self.assertEqual( hPlug.child( 0 ).asFloat(), 1 )
self.assertEqual( hPlug.child( 1 ).asFloat(), 1 )
self.assertEqual( hPlug.child( 2 ).asFloat(), 1 )
self.assertEqual( qPlug.asBool(), False )
self.assertEqual( tPlug.child( 0 ).child( 0 ).asFloat(), -1 )
self.assertEqual( tPlug.child( 0 ).child( 1 ).asFloat(), -1 )
self.assertEqual( tPlug.child( 0 ).child( 2 ).asFloat(), -1 )
self.assertEqual( tPlug.child( 1 ).child( 0 ).asFloat(), 1 )
self.assertEqual( tPlug.child( 1 ).child( 1 ).asFloat(), 1 )
self.assertEqual( tPlug.child( 1 ).child( 2 ).asFloat(), 1 )
# check that the parameter values are unchanged in the undo process
#############################################################################
self.assertEqual( op["a"].getNumericValue(), 10 )
self.assertEqual( op["b"].getNumericValue(), 100 )
self.assertEqual( op["c"].getNumericValue(), 12 )
self.assertEqual( op["d"].getTypedValue(), "a" )
self.assertEqual( op["e"].getValue(), IECore.IntVectorData( [ 1, 2, 3, 4 ] ) )
self.assertEqual( op["f"].getValue(), IECore.StringVectorData( [ "hi" ] ) )
self.assertEqual( op["g"].getTypedValue(), IECore.V2f( 10, 100 ) )
self.assertEqual( op["h"].getTypedValue(), IECore.V3f( -1, -2, -3 ) )
self.assertEqual( op["q"].getTypedValue(), True )
self.assertEqual( op["t"].getTypedValue(), IECore.Box3f( IECore.V3f( -10 ), IECore.V3f( 0 ) ) )
# redo, and check they are changed again
#############################################################################
maya.cmds.redo()
self.assertEqual( aPlug.asInt(), 10 )
self.assertEqual( bPlug.asFloat(), 100 )
self.assertEqual( cPlug.asDouble(), 12 )
self.assertEqual( dPlug.asString(), "a" )
fnE = maya.OpenMaya.MFnIntArrayData( ePlug.asMObject() )
self.assertEqual( fnE[0], 1 )
self.assertEqual( fnE[1], 2 )
self.assertEqual( fnE[2], 3 )
self.assertEqual( fnE[3], 4 )
self.assertEqual( fnE.length(), 4 )
fnF = maya.OpenMaya.MFnStringArrayData( fPlug.asMObject() )
fList = []
fnF.copyTo( fList )
self.assertEqual( fList, [ "hi" ] )
self.assertEqual( gPlug.child( 0 ).asFloat(), 10 )
self.assertEqual( gPlug.child( 1 ).asFloat(), 100 )
self.assertEqual( hPlug.child( 0 ).asFloat(), -1 )
self.assertEqual( hPlug.child( 1 ).asFloat(), -2 )
self.assertEqual( hPlug.child( 2 ).asFloat(), -3 )
self.assertEqual( qPlug.asBool(), True )
self.assertEqual( tPlug.child( 0 ).child( 0 ).asFloat(), -10 )
self.assertEqual( tPlug.child( 0 ).child( 1 ).asFloat(), -10 )
self.assertEqual( tPlug.child( 0 ).child( 2 ).asFloat(), -10 )
self.assertEqual( tPlug.child( 1 ).child( 0 ).asFloat(), 0 )
self.assertEqual( tPlug.child( 1 ).child( 1 ).asFloat(), 0 )
self.assertEqual( tPlug.child( 1 ).child( 2 ).asFloat(), 0 )
# check that the parameter values are unchanged in the redo process
#############################################################################
self.assertEqual( op["a"].getNumericValue(), 10 )
self.assertEqual( op["b"].getNumericValue(), 100 )
self.assertEqual( op["c"].getNumericValue(), 12 )
self.assertEqual( op["d"].getTypedValue(), "a" )
self.assertEqual( op["e"].getValue(), IECore.IntVectorData( [ 1, 2, 3, 4 ] ) )
self.assertEqual( op["f"].getValue(), IECore.StringVectorData( [ "hi" ] ) )
self.assertEqual( op["g"].getTypedValue(), IECore.V2f( 10, 100 ) )
self.assertEqual( op["h"].getTypedValue(), IECore.V3f( -1, -2, -3 ) )
self.assertEqual( op["q"].getTypedValue(), True )
self.assertEqual( op["t"].getTypedValue(), IECore.Box3f( IECore.V3f( -10 ), IECore.V3f( 0 ) ) )
def testSetNodeValueUndo( self ) :
p = IECore.Parameterised( "" )
p.parameters().addParameters(
[
IECore.IntParameter(
"i",
"",
1
),
IECore.FloatParameter(
"f",
"",
2
)
]
)
node = maya.cmds.createNode( "ieParameterisedHolderLocator" )
fnOH = IECoreMaya.FnParameterisedHolder( node )
fnOH.setParameterised( p )
# check the start values are as expected
self.assertEqual( p["i"].getNumericValue(), 1 )
self.assertEqual( p["f"].getNumericValue(), 2 )
self.assertEqual( fnOH.parameterPlug( p["i"] ).asInt(), 1 )
self.assertEqual( fnOH.parameterPlug( p["f"] ).asInt(), 2 )
# change both parameters
self.assert_( maya.cmds.undoInfo( query=True, state=True ) )
p["i"].setNumericValue( 10 )
p["f"].setNumericValue( 11 )
self.assertEqual( p["i"].getNumericValue(), 10 )
self.assertEqual( p["f"].getNumericValue(), 11 )
self.assertEqual( fnOH.parameterPlug( p["i"] ).asInt(), 1 )
self.assertEqual( fnOH.parameterPlug( p["f"] ).asInt(), 2 )
# but push only one into maya
fnOH.setNodeValue( p["i"] )
# and check we see what we expect
self.assertEqual( p["i"].getNumericValue(), 10 )
self.assertEqual( p["f"].getNumericValue(), 11 )
self.assertEqual( fnOH.parameterPlug( p["i"] ).asInt(), 10 )
self.assertEqual( fnOH.parameterPlug( p["f"] ).asInt(), 2 )
# undo and check
maya.cmds.undo()
self.assertEqual( p["i"].getNumericValue(), 10 )
self.assertEqual( p["f"].getNumericValue(), 11 )
self.assertEqual( fnOH.parameterPlug( p["i"] ).asInt(), 1 )
self.assertEqual( fnOH.parameterPlug( p["f"] ).asInt(), 2 )
# redo and check
maya.cmds.redo()
self.assertEqual( p["i"].getNumericValue(), 10 )
self.assertEqual( p["f"].getNumericValue(), 11 )
self.assertEqual( fnOH.parameterPlug( p["i"] ).asInt(), 10 )
self.assertEqual( fnOH.parameterPlug( p["f"] ).asInt(), 2 )
def testExcessReferenceEdits( self ) :
IECoreMaya.FnOpHolder.create( "testOp", "maths/multiply", 2 )
# Save the scene out so we can reference it
maya.cmds.file( rename = os.path.join( os.getcwd(), "test", "IECoreMaya", "referenceEditCounts.ma" ) )
referenceScene = maya.cmds.file( force = True, type = "mayaAscii", save = True )
# New scene, and read it in.
maya.cmds.file( new = True, force = True )
maya.cmds.file( referenceScene, reference = True, namespace = "ns1" )
# Check there are no reference edits
fnOH = IECoreMaya.FnOpHolder( 'ns1:testOp' )
op = fnOH.getOp()
aPath = fnOH.parameterPlugPath( op["a"] )
bPath = fnOH.parameterPlugPath( op["b"] )
self.assertEqual( maya.cmds.getAttr( aPath ), 1 )
self.assertEqual( maya.cmds.getAttr( bPath ), 2 )
self.assertEqual( len(maya.cmds.referenceQuery( referenceScene, editStrings=True )), 0 )
# set values, but with no changes
with fnOH.parameterModificationContext() :
op["a"].setNumericValue( 1 )
op["b"].setNumericValue( 2 )
# Check the values are the same, and there are still no reference edits
self.assertEqual( maya.cmds.getAttr( aPath ), 1 )
self.assertEqual( maya.cmds.getAttr( bPath ), 2 )
self.assertEqual( len(maya.cmds.referenceQuery( referenceScene, editStrings=True )), 0 )
# change a value to a genuinely new value
with fnOH.parameterModificationContext() :
op["a"].setNumericValue( 100 )
# Check the maya value is updated and there is 1 reference edit
self.assertEqual( maya.cmds.getAttr( aPath ), 100 )
self.assertEqual( maya.cmds.getAttr( bPath ), 2 )
self.assertEqual( len(maya.cmds.referenceQuery( referenceScene, editStrings=True )), 1 )
# Undo and check there is still 1 reference edit. Ideally there would be none but
# maya isn't that clever.
maya.cmds.undo()
self.assertEqual( maya.cmds.getAttr( aPath ), 1 )
self.assertEqual( maya.cmds.getAttr( bPath ), 2 )
self.assertEqual( len(maya.cmds.referenceQuery( referenceScene, editStrings=True )), 1 )
def testExcessClassParameterReferenceEdits( self ) :
# Save a scene with a ClassParameter in it
fnOH = IECoreMaya.FnOpHolder.create( "node", "classParameterTest", 1 )
op = fnOH.getOp()
with fnOH.parameterModificationContext() :
op["cp"].setClass( "maths/multiply", 1, "IECORE_OP_PATHS" )
self.assertEqual( op["cp"].getClass( True )[1:], ( "maths/multiply", 1, "IECORE_OP_PATHS" ) )
maya.cmds.file( rename = os.path.join( os.getcwd(), "test", "IECoreMaya", "referenceEditCounts.ma" ) )
referenceScene = maya.cmds.file( force = True, type = "mayaAscii", save = True )
# And reference it back in to a new scene
maya.cmds.file( new = True, force = True )
maya.cmds.file( referenceScene, reference = True, namespace = "ns1" )
self.assertEqual( len(maya.cmds.referenceQuery( referenceScene, editStrings=True )), 0 )
# Make a modification which does nothing and check that there are no reference edits
fnOH = IECoreMaya.FnOpHolder( "ns1:node" )
with fnOH.parameterModificationContext() :
pass
self.assertEqual( len(maya.cmds.referenceQuery( referenceScene, editStrings=True )), 0 )
# Make a modification which happens to set things to the values they're already at and
# check that there are no reference edits
op = fnOH.getOp()
with fnOH.parameterModificationContext() :
op["cp"].setClass( "maths/multiply", 1, "IECORE_OP_PATHS" )
self.assertEqual( len(maya.cmds.referenceQuery( referenceScene, editStrings=True )), 0 )
def testExcessClassVectorParameterReferenceEdits( self ) :
# Save a scene with a ClassParameter in it
fnOH = IECoreMaya.FnOpHolder.create( "node", "classVectorParameterTest", 1 )
op = fnOH.getOp()
with fnOH.parameterModificationContext() :
op["cv"].setClasses(
[
( "mult", "maths/multiply", 1 ),
( "coIO", "compoundObjectInOut", 1 ),
]
)
maya.cmds.file( rename = os.path.join( os.getcwd(), "test", "IECoreMaya", "referenceEditCounts.ma" ) )
referenceScene = maya.cmds.file( force = True, type = "mayaAscii", save = True )
# And reference it back in to a new scene
maya.cmds.file( new = True, force = True )
maya.cmds.file( referenceScene, reference = True, namespace = "ns1" )
self.assertEqual( len(maya.cmds.referenceQuery( referenceScene, editStrings=True )), 0 )
# Make a modification which does nothing and check that there are no reference edits
fnOH = IECoreMaya.FnOpHolder( "ns1:node" )
with fnOH.parameterModificationContext() :
pass
self.assertEqual( len(maya.cmds.referenceQuery( referenceScene, editStrings=True )), 0 )
# Make a modification which happens to set things to the values they're already at and
# check that there are no reference edits
with fnOH.parameterModificationContext() :
op["cv"].setClasses(
[
( "mult", "maths/multiply", 1 ),
( "coIO", "compoundObjectInOut", 1 ),
]
)
self.assertEqual( len(maya.cmds.referenceQuery( referenceScene, editStrings=True )), 0 )
def testSetParameterValuesUsingContext( self ) :
fnOH = IECoreMaya.FnOpHolder.create( "testOp", "maths/multiply", 2 )
op = fnOH.getOp()
aPath = fnOH.parameterPlugPath( op["a"] )
self.assertEqual( maya.cmds.getAttr( aPath ), 1 )
with fnOH.parameterModificationContext() :
op["a"].setNumericValue( 10023 )
self.assertEqual( maya.cmds.getAttr( aPath ), 10023 )
maya.cmds.undo()
self.assertEqual( maya.cmds.getAttr( aPath ), 1 )
maya.cmds.redo()
self.assertEqual( maya.cmds.getAttr( aPath ), 10023 )
def testSetParameterValuesAndClassesUsingContext( self ) :
fnOH = IECoreMaya.FnOpHolder.create( "node", "classParameterTest", 1 )
with fnOH.parameterModificationContext() as op :
op["cp"].setClass( "maths/multiply", 1, "IECORE_OP_PATHS" )
op["cp"]["a"].setNumericValue( 10101 )
aPath = fnOH.parameterPlugPath( op["cp"]["a"] )
self.assertEqual( maya.cmds.getAttr( aPath ), 10101 )
maya.cmds.undo()
self.assertEqual( op["cp"].getClass(), None )
maya.cmds.redo()
self.assertEqual( op["cp"].getClass( True )[1:], ( "maths/multiply", 1, "IECORE_OP_PATHS" ) )
self.assertEqual( maya.cmds.getAttr( aPath ), 10101 )
def testBoxDefaultValue( self ) :
node = maya.cmds.createNode( "ieOpHolderNode" )
fnPH = IECoreMaya.FnParameterisedHolder( node )
op = IECore.ClassLoader.defaultOpLoader().load( "parameterTypes", 1 )()
op.parameters().removeParameter( "m" ) # no color4f support in maya
fnPH.setParameterised( op )
node, plug = fnPH.parameterPlugPath( op["s"] ).split( "." )
self.assertEqual( maya.cmds.attributeQuery( plug + "Min", listDefault=True, node=node ), [ -1.0, -1.0 ] )
self.assertEqual( maya.cmds.attributeQuery( plug + "Max", listDefault=True, node=node ), [ 1.0, 1.0 ] )
node, plug = fnPH.parameterPlugPath( op["t"] ).split( "." )
self.assertEqual( maya.cmds.attributeQuery( plug + "Min", listDefault=True, node=node ), [ -1.0, -1.0, -1.0 ] )
self.assertEqual( maya.cmds.attributeQuery( plug + "Max", listDefault=True, node=node ), [ 1.0, 1.0, 1.0 ] )
def testArrayPlugCreation( self ) :
op = IECore.Op( 'test op', IECore.IntParameter( 'result', '', 0 ) )
op.parameters().addParameters( [
IECore.V3fVectorParameter( 'v3fVector', '', IECore.V3fVectorData() ),
IECore.V3dVectorParameter( 'v3dVector', '', IECore.V3dVectorData() ),
IECore.StringVectorParameter( 'stringVector', '', IECore.StringVectorData() ),
IECore.DoubleVectorParameter( 'doubleVector', '', IECore.DoubleVectorData() ),
IECore.FloatVectorParameter( 'floatVector', '', IECore.FloatVectorData() ),
IECore.IntVectorParameter( 'intVector', '', IECore.IntVectorData() ),
IECore.BoolVectorParameter( 'boolVector', '', IECore.BoolVectorData() ),
IECore.M44fVectorParameter( 'm44fVector', '', IECore.M44fVectorData() ),
IECore.M44dVectorParameter( 'm44dVector', '', IECore.M44dVectorData() ),
] )
node = maya.cmds.createNode( 'ieOpHolderNode' )
fnPH = IECoreMaya.FnParameterisedHolder( node )
self.assert_( not maya.cmds.objExists( node+'.parm_v3fVector' ) )
self.assert_( not maya.cmds.objExists( node+'.parm_v3dVector' ) )
self.assert_( not maya.cmds.objExists( node+'.parm_stringVector' ) )
self.assert_( not maya.cmds.objExists( node+'.parm_doubleVector' ) )
self.assert_( not maya.cmds.objExists( node+'.parm_floatVector' ) )
self.assert_( not maya.cmds.objExists( node+'.parm_intVector' ) )
self.assert_( not maya.cmds.objExists( node+'.parm_boolVector' ) )
self.assert_( not maya.cmds.objExists( node+'.parm_m44fVector' ) )
self.assert_( not maya.cmds.objExists( node+'.parm_m44dVector' ) )
fnPH.setParameterised( op )
self.assert_( maya.cmds.objExists( node+'.parm_v3fVector' ) )
self.assert_( maya.cmds.objExists( node+'.parm_v3dVector' ) )
self.assert_( maya.cmds.objExists( node+'.parm_stringVector' ) )
self.assert_( maya.cmds.objExists( node+'.parm_doubleVector' ) )
self.assert_( maya.cmds.objExists( node+'.parm_floatVector' ) )
self.assert_( maya.cmds.objExists( node+'.parm_intVector' ) )
self.assert_( maya.cmds.objExists( node+'.parm_boolVector' ) )
self.assert_( maya.cmds.objExists( node+'.parm_m44fVector' ) )
self.assert_( maya.cmds.objExists( node+'.parm_m44dVector' ) )
self.assertEqual( maya.cmds.getAttr( node+'.parm_v3fVector', type=True ), 'vectorArray' )
self.assertEqual( maya.cmds.getAttr( node+'.parm_v3dVector', type=True ), 'vectorArray' )
self.assertEqual( maya.cmds.getAttr( node+'.parm_stringVector', type=True ), 'stringArray' )
self.assertEqual( maya.cmds.getAttr( node+'.parm_doubleVector', type=True ), 'doubleArray' )
self.assertEqual( maya.cmds.getAttr( node+'.parm_floatVector', type=True ), 'doubleArray' )
self.assertEqual( maya.cmds.getAttr( node+'.parm_intVector', type=True ), 'Int32Array' )
self.assertEqual( maya.cmds.getAttr( node+'.parm_boolVector', type=True ), 'Int32Array' )
self.assertEqual( maya.cmds.getAttr( node+'.parm_m44fVector', type=True ), 'doubleArray' )
self.assertEqual( maya.cmds.getAttr( node+'.parm_m44dVector', type=True ), 'doubleArray' )
def testMatrixVectorPlugs( self ) :
m44fVector = IECore.M44fVectorData( [ IECore.M44f( 1 ), IECore.M44f( 2 ), IECore.M44f( 3 ) ] )
m44dVector = IECore.M44dVectorData( [ IECore.M44d( 1 ), IECore.M44d( 2 ), IECore.M44d( 3 ) ] )
reverseM44fVector = IECore.M44fVectorData( [ IECore.M44f( 3 ), IECore.M44f( 2 ), IECore.M44f( 1 ) ] )
reverseM44dVector = IECore.M44dVectorData( [ IECore.M44d( 3 ), IECore.M44d( 2 ), IECore.M44d( 1 ) ] )
mayaArray = []
for i in range( 0, 3 ) :
for j in range( 0, 16 ) :
mayaArray.append( i+1 )
reverseMayaArray = list(mayaArray)
reverseMayaArray.reverse()
op = IECore.Op( 'test op', IECore.IntParameter( 'result', '', 0 ) )
op.parameters().addParameters( [
IECore.M44fVectorParameter( 'm44fVector', '', IECore.M44fVectorData() ),
IECore.M44dVectorParameter( 'm44dVector', '', IECore.M44dVectorData() ),
] )
node = maya.cmds.createNode( 'ieOpHolderNode' )
fnPH = IECoreMaya.FnParameterisedHolder( node )
fnPH.setParameterised( op )
# set from cortex to maya
self.assertNotEqual( mayaArray, maya.cmds.getAttr( node+'.parm_m44fVector' ) )
self.assertNotEqual( mayaArray, maya.cmds.getAttr( node+'.parm_m44dVector' ) )
fnPH.getParameterised()[0].parameters()['m44fVector'].setValue( m44fVector )
fnPH.getParameterised()[0].parameters()['m44dVector'].setValue( m44dVector )
fnPH.setNodeValues()
self.assertEqual( mayaArray, maya.cmds.getAttr( node+'.parm_m44fVector' ) )
self.assertEqual( mayaArray, maya.cmds.getAttr( node+'.parm_m44dVector' ) )
# set from maya to cortex
self.assertNotEqual( reverseM44fVector, fnPH.getParameterised()[0].parameters()['m44fVector'].getValue() )
self.assertNotEqual( reverseM44dVector, fnPH.getParameterised()[0].parameters()['m44dVector'].getValue() )
maya.cmds.setAttr( node+'.parm_m44fVector', reverseMayaArray, type="doubleArray" )
maya.cmds.setAttr( node+'.parm_m44dVector', reverseMayaArray, type="doubleArray" )
fnPH.setParameterisedValues()
self.assertEqual( reverseM44fVector, fnPH.getParameterised()[0].parameters()['m44fVector'].getValue() )
self.assertEqual( reverseM44dVector, fnPH.getParameterised()[0].parameters()['m44dVector'].getValue() )
# set to incorrect length from maya
maya.cmds.setAttr( node+'.parm_m44fVector', [0,1,2], type="doubleArray" )
maya.cmds.setAttr( node+'.parm_m44dVector', [0,1,2], type="doubleArray" )
fnPH.setParameterisedValues()
self.assertEqual( None, fnPH.getParameterised()[0].parameters()['m44fVector'].getValue() )
self.assertEqual( None, fnPH.getParameterised()[0].parameters()['m44dVector'].getValue() )
def testResultAttrSaveLoad( self ) :
node = maya.cmds.createNode( "ieOpHolderNode" )
fnPH = IECoreMaya.FnOpHolder( node )
fnPH.setOp( "floatParameter" )
self.assertNotEqual( maya.cmds.getAttr( node + ".parm_f" ), 50.5 )
self.assertNotEqual( maya.cmds.getAttr( node + ".result" ), 50.5 )
maya.cmds.setAttr( node + ".parm_f", 50.5 )
self.assertEqual( maya.cmds.getAttr( node + ".parm_f" ), 50.5 )
self.assertEqual( maya.cmds.getAttr( node + ".result" ), 50.5 )
maya.cmds.file( rename = os.path.join( os.getcwd(), "test", "IECoreMaya", "resultAttrLoadTest.ma" ) )
testScene = maya.cmds.file( force = True, type = "mayaAscii", save = True )
maya.cmds.file( testScene, f = True, o = True )
self.assertEqual( maya.cmds.getAttr( node + ".parm_f" ), 50.5 )
self.assertEqual( maya.cmds.getAttr( node + ".result" ), 50.5 )
@unittest.skipIf( maya.OpenMaya.MGlobal.apiVersion() < 201600, "Inactive node state causes a seg fault prior to Maya 2016" )
def testResultAttrSaveLoadMeshConnections( self ) :
box = maya.cmds.listRelatives( maya.cmds.polyCube(), shapes=True )[0]
torus = maya.cmds.listRelatives( maya.cmds.polyTorus(), shapes=True )[0]
node = maya.cmds.createNode( "ieOpHolderNode" )
fnPH = IECoreMaya.FnOpHolder( node )
fnPH.setOp( "meshMerge" )
maya.cmds.connectAttr( box + ".outMesh", node + ".parm_input" )
maya.cmds.connectAttr( torus + ".outMesh", node + ".parm_mesh" )
mesh = maya.cmds.createNode( "mesh" )
maya.cmds.connectAttr( node + ".result", mesh + ".inMesh" )
quads = maya.cmds.polyQuad( mesh )[0]
joint = maya.cmds.createNode( "joint" )
cluster = maya.cmds.skinCluster( mesh, joint )
fnMesh = maya.OpenMaya.MFnMesh( IECoreMaya.dependencyNodeFromString( mesh ) )
self.assertEqual( fnMesh.numVertices(), 408 )
self.assertEqual( fnMesh.numPolygons(), 406 )
maya.cmds.file( rename = os.path.join( os.getcwd(), "test", "IECoreMaya", "resultAttrLoadTest.ma" ) )
testScene = maya.cmds.file( force = True, type = "mayaAscii", save = True )
maya.cmds.file( testScene, f = True, o = True )
fnMesh = maya.OpenMaya.MFnMesh( IECoreMaya.dependencyNodeFromString( mesh ) )
self.assertEqual( fnMesh.numVertices(), 408 )
self.assertEqual( fnMesh.numPolygons(), 406 )
self.assertEqual( maya.cmds.getAttr( quads + ".nodeState" ), 0 )
def testParameterPlugForMissingPlug( self ) :
## Make sure that null plugs are returned from the parameterPlug() method
# if no plug exists.
node = maya.cmds.createNode( "ieOpHolderNode" )
fnPH = IECoreMaya.FnOpHolder( node )
fnPH.setOp( "floatParameter" )
op = fnPH.getOp()
plug = fnPH.parameterPlug( op.parameters() )
self.failUnless( isinstance( plug, maya.OpenMaya.MPlug ) )
self.failUnless( plug.isNull() )
def testLsMethods( self ) :
# create a couple of holders:
opHolderNode = maya.cmds.createNode( "ieOpHolderNode" )
fnOH = IECoreMaya.FnOpHolder( opHolderNode )
fnOH.setOp( "floatParameter" )
converterHolderNode = maya.cmds.createNode( "ieConverterHolder" )
fnCH = IECoreMaya.FnConverterHolder( converterHolderNode )
#fnCH.setOp( "floatParameter" )
node = maya.cmds.createNode( "ieProceduralHolder" )
node2 = maya.cmds.createNode( "ieProceduralHolder" )
fnPH = IECoreMaya.FnProceduralHolder( node )
proc = IECore.ReadProcedural()
fnPH.setParameterised( proc )
fnPH2 = IECoreMaya.FnProceduralHolder( node2 )
# do an ls on the op holders: should only be one
opHolders = IECoreMaya.FnOpHolder.ls()
self.assertEqual( len( opHolders ), 1 )
self.failUnless( isinstance( opHolders[0], IECoreMaya.FnOpHolder ) )
self.assertEqual( opHolders[0].fullPathName(), opHolderNode )
# do an ls on the procedural holders: should be two
self.assertEqual( len( IECoreMaya.FnProceduralHolder.ls() ), 2 )
# do an ls on the procedural holders containing IECore.ReadProcedurals: should be one
self.assertEqual( len( IECoreMaya.FnProceduralHolder.ls( classType=IECore.ReadProcedural ) ), 1 )
# find full path name of node holding ReadProcedural, and check it's the same as the one returned by ls:
node = maya.cmds.ls( node, l=True )[0]
self.assertEqual( IECoreMaya.FnProceduralHolder.ls( classType=IECore.ReadProcedural )[0].fullPathName(), node )
# do an ls on the converter holders, this time just returning node names:
converterHolders = IECoreMaya.FnConverterHolder.ls( fnSets=False )
self.assertEqual( len( converterHolders ), 1 )
self.assertEqual( converterHolders[0], converterHolderNode )
def tearDown( self ) :
for f in [
"test/IECoreMaya/referenceEditCounts.ma",
"test/IECoreMaya/resultAttrLoadTest.ma",
"test/IECoreMaya/resultGetParameterisedTest.ma",
] :
if os.path.exists( f ) :
os.remove( f )
if __name__ == "__main__":
IECoreMaya.TestProgram( plugins = [ "ieCore" ] )
|
goddardl/cortex
|
test/IECoreMaya/FnParameterisedHolderTest.py
|
Python
|
bsd-3-clause
| 35,854
|
#!/usr/bin/env python
# encoding: utf-8
import unittest
import os.path as p, sys; sys.path.append(p.join(p.dirname(__file__), ".."))
from _diff import diff, guess_edit
from geometry import Position
def transform(a, cmds):
buf = a.split("\n")
for cmd in cmds:
ctype, line, col, char = cmd
if ctype == "D":
if char != '\n':
buf[line] = buf[line][:col] + buf[line][col+len(char):]
else:
buf[line] = buf[line] + buf[line+1]
del buf[line+1]
elif ctype == "I":
buf[line] = buf[line][:col] + char + buf[line][col:]
buf = '\n'.join(buf).split('\n')
return '\n'.join(buf)
import unittest
# Test Guessing {{{
class _BaseGuessing(object):
def runTest(self):
rv, es = guess_edit(self.initial_line, self.a, self.b, Position(*self.ppos), Position(*self.pos))
self.assertEqual(rv, True)
self.assertEqual(self.wanted, es)
class TestGuessing_Noop0(_BaseGuessing, unittest.TestCase):
a, b = [], []
initial_line = 0
ppos, pos = (0, 6), (0, 7)
wanted = ()
class TestGuessing_InsertOneChar(_BaseGuessing, unittest.TestCase):
a, b = ["Hello World"], ["Hello World"]
initial_line = 0
ppos, pos = (0, 6), (0, 7)
wanted = (
("I", 0, 6, " "),
)
class TestGuessing_InsertOneChar1(_BaseGuessing, unittest.TestCase):
a, b = ["Hello World"], ["Hello World"]
initial_line = 0
ppos, pos = (0, 7), (0, 8)
wanted = (
("I", 0, 7, " "),
)
class TestGuessing_BackspaceOneChar(_BaseGuessing, unittest.TestCase):
a, b = ["Hello World"], ["Hello World"]
initial_line = 0
ppos, pos = (0, 7), (0, 6)
wanted = (
("D", 0, 6, " "),
)
class TestGuessing_DeleteOneChar(_BaseGuessing, unittest.TestCase):
a, b = ["Hello World"], ["Hello World"]
initial_line = 0
ppos, pos = (0, 5), (0, 5)
wanted = (
("D", 0, 5, " "),
)
# End: Test Guessing }}}
class _Base(object):
def runTest(self):
es = diff(self.a, self.b)
tr = transform(self.a, es)
self.assertEqual(self.b, tr)
self.assertEqual(self.wanted, es)
class TestEmptyString(_Base, unittest.TestCase):
a, b = "", ""
wanted = ()
class TestAllMatch(_Base, unittest.TestCase):
a, b = "abcdef", "abcdef"
wanted = ()
class TestLotsaNewlines(_Base, unittest.TestCase):
a, b = "Hello", "Hello\nWorld\nWorld\nWorld"
wanted = (
("I", 0, 5, "\n"),
("I", 1, 0, "World"),
("I", 1, 5, "\n"),
("I", 2, 0, "World"),
("I", 2, 5, "\n"),
("I", 3, 0, "World"),
)
class TestCrash(_Base, unittest.TestCase):
a = 'hallo Blah mitte=sdfdsfsd\nhallo kjsdhfjksdhfkjhsdfkh mittekjshdkfhkhsdfdsf'
b = 'hallo Blah mitte=sdfdsfsd\nhallo b mittekjshdkfhkhsdfdsf'
wanted = (
("D", 1, 6, "kjsdhfjksdhfkjhsdfkh"),
("I", 1, 6, "b"),
)
class TestRealLife(_Base, unittest.TestCase):
a = 'hallo End Beginning'
b = 'hallo End t'
wanted = (
("D", 0, 10, "Beginning"),
("I", 0, 10, "t"),
)
class TestRealLife1(_Base, unittest.TestCase):
a = 'Vorne hallo Hinten'
b = 'Vorne hallo Hinten'
wanted = (
("I", 0, 11, " "),
)
class TestWithNewline(_Base, unittest.TestCase):
a = 'First Line\nSecond Line'
b = 'n'
wanted = (
("D", 0, 0, "First Line"),
("D", 0, 0, "\n"),
("D", 0, 0, "Second Line"),
("I", 0, 0, "n"),
)
class TestCheapDelete(_Base, unittest.TestCase):
a = 'Vorne hallo Hinten'
b = 'Vorne Hinten'
wanted = (
("D", 0, 5, " hallo"),
)
class TestNoSubstring(_Base, unittest.TestCase):
a,b = "abc", "def"
wanted = (
("D", 0, 0, "abc"),
("I", 0, 0, "def"),
)
class TestCommonCharacters(_Base, unittest.TestCase):
a,b = "hasomelongertextbl", "hol"
wanted = (
("D", 0, 1, "asomelongertextb"),
("I", 0, 1, "o"),
)
class TestUltiSnipsProblem(_Base, unittest.TestCase):
a = "this is it this is it this is it"
b = "this is it a this is it"
wanted = (
("D", 0, 11, "this is it"),
("I", 0, 11, "a"),
)
class MatchIsTooCheap(_Base, unittest.TestCase):
a = "stdin.h"
b = "s"
wanted = (
("D", 0, 1, "tdin.h"),
)
class MultiLine(_Base, unittest.TestCase):
a = "hi first line\nsecond line first line\nsecond line world"
b = "hi first line\nsecond line k world"
wanted = (
("D", 1, 12, "first line"),
("D", 1, 12, "\n"),
("D", 1, 12, "second line"),
("I", 1, 12, "k"),
)
if __name__ == '__main__':
unittest.main()
# k = TestEditScript()
# unittest.TextTestRunner().run(k)
|
wholland/env
|
vim/runtime/bundle/ultisnips/plugin/UltiSnips/tests/test_diff.py
|
Python
|
mit
| 4,789
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import threading
try:
import urllib.parse as parse
except ImportError:
import urlparse as parse # type: ignore
try:
from typing import TYPE_CHECKING
except ImportError:
TYPE_CHECKING = False
if TYPE_CHECKING:
# pylint: disable=unused-import
from typing import Dict
from .http_challenge import HttpChallenge
_cache = {} # type: Dict[str, HttpChallenge]
_lock = threading.Lock()
def get_challenge_for_url(url):
""" Gets the challenge for the cached URL.
:param url: the URL the challenge is cached for.
:rtype: HttpBearerChallenge """
if not url:
raise ValueError("URL cannot be None")
key = _get_cache_key(url)
with _lock:
return _cache.get(key)
def _get_cache_key(url):
"""Use the URL's netloc as cache key except when the URL specifies the default port for its scheme. In that case
use the netloc without the port. That is to say, https://foo.bar and https://foo.bar:443 are considered equivalent.
This equivalency prevents an unnecessary challenge when using Key Vault's paging API. The Key Vault client doesn't
specify ports, but Key Vault's next page links do, so a redundant challenge would otherwise be executed when the
client requests the next page."""
parsed = parse.urlparse(url)
if parsed.scheme == "https" and parsed.port == 443:
return parsed.netloc[:-4]
return parsed.netloc
def remove_challenge_for_url(url):
""" Removes the cached challenge for the specified URL.
:param url: the URL for which to remove the cached challenge """
if not url:
raise ValueError("URL cannot be empty")
url = parse.urlparse(url)
with _lock:
del _cache[url.netloc]
def set_challenge_for_url(url, challenge):
""" Caches the challenge for the specified URL.
:param url: the URL for which to cache the challenge
:param challenge: the challenge to cache """
if not url:
raise ValueError("URL cannot be empty")
if not challenge:
raise ValueError("Challenge cannot be empty")
src_url = parse.urlparse(url)
if src_url.netloc != challenge.source_authority:
raise ValueError("Source URL and Challenge URL do not match")
with _lock:
_cache[src_url.netloc] = challenge
def clear():
""" Clears the cache. """
with _lock:
_cache.clear()
|
Azure/azure-sdk-for-python
|
sdk/keyvault/azure-keyvault-secrets/azure/keyvault/secrets/_shared/http_challenge_cache.py
|
Python
|
mit
| 2,520
|
# Time: O(n)
# Space: O(1)
# Suppose you have a long flowerbed in which some of the plots are planted and some are not.
# However, flowers cannot be planted in adjacent plots - they would compete for water
# and both would die.
#
# Given a flowerbed (represented as an array containing 0 and 1,
# where 0 means empty and 1 means not empty), and a number n,
# return if n new flowers can be planted in it without violating the no-adjacent-flowers rule.
#
# Example 1:
# Input: flowerbed = [1,0,0,0,1], n = 1
# Output: True
# Example 2:
# Input: flowerbed = [1,0,0,0,1], n = 2
# Output: False
# Note:
# The input array won't violate no-adjacent-flowers rule.
# The input array size is in the range of [1, 20000].
# n is a non-negative integer which won't exceed the input array size.
class Solution(object):
def canPlaceFlowers(self, flowerbed, n):
"""
:type flowerbed: List[int]
:type n: int
:rtype: bool
"""
for i in xrange(len(flowerbed)):
if flowerbed[i] == 0 and (i == 0 or flowerbed[i-1] == 0) and \
(i == len(flowerbed)-1 or flowerbed[i+1] == 0):
flowerbed[i] = 1
n -= 1
if n <= 0:
return True
return False
|
yiwen-luo/LeetCode
|
Python/can-place-flowers.py
|
Python
|
mit
| 1,263
|
from flask import render_template
from . import auth
@auth.route('/login')
def login():
return render_template('auth/login.html')
|
baojiwei/flasky
|
app/auth/views.py
|
Python
|
mit
| 136
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# mediawiki-utilities documentation build configuration file, created by
# sphinx-quickstart on Thu Apr 10 17:31:47 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
import mw
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'mediawiki-utilities'
copyright = '2014, Aaron Halfaker'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = mw.__version__
# The full version, including alpha/beta/rc tags.
release = mw.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'mediawiki-utilitiesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'mediawiki-utilities.tex', 'mediawiki-utilities Documentation',
'Aaron Halfaker', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mediawiki-utilities', 'mediawiki-utilities Documentation',
['Aaron Halfaker'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'mediawiki-utilities', 'mediawiki-utilities Documentation',
'Aaron Halfaker', 'mediawiki-utilities', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
mediawiki-utilities/python-mediawiki-utilities
|
doc/conf.py
|
Python
|
mit
| 8,467
|
##
# Copyright 2009-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing scipy, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
from distutils.version import LooseVersion
from easybuild.easyblocks.generic.fortranpythonpackage import FortranPythonPackage
from easybuild.easyblocks.generic.pythonpackage import det_pylibdir
import easybuild.tools.toolchain as toolchain
class EB_scipy(FortranPythonPackage):
"""Support for installing the scipy Python package as part of a Python installation."""
def __init__(self, *args, **kwargs):
"""Set scipy-specific test command."""
super(EB_scipy, self).__init__(*args, **kwargs)
self.testinstall = True
self.testcmd = "cd .. && %(python)s -c 'import numpy; import scipy; scipy.test(verbose=2)'"
def configure_step(self):
"""Custom configure step for scipy: set extra installation options when needed."""
super(EB_scipy, self).configure_step()
if LooseVersion(self.version) >= LooseVersion('0.13'):
# in recent scipy versions, additional compilation is done in the install step,
# which requires unsetting $LDFLAGS
if self.toolchain.comp_family() in [toolchain.GCC, toolchain.CLANGGCC]: # @UndefinedVariable
self.cfg.update('preinstallopts', "unset LDFLAGS && ")
def sanity_check_step(self, *args, **kwargs):
"""Custom sanity check for scipy."""
# can't use self.pylibdir here, need to determine path on the fly using currently active 'python' command;
# this is important for numpy installations for multiple Python version (via multi_deps)
custom_paths = {
'files': [],
'dirs': [det_pylibdir()],
}
return super(EB_scipy, self).sanity_check_step(custom_paths=custom_paths)
|
hpcuantwerpen/easybuild-easyblocks
|
easybuild/easyblocks/s/scipy.py
|
Python
|
gpl-2.0
| 3,046
|
"""distutils.file_util
Utility functions for operating on single files.
"""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: file_util.py,v 1.17 2004/11/10 22:23:14 loewis Exp $"
import os
from distutils.errors import DistutilsFileError
from distutils import log
# for generating verbose output in 'copy_file()'
_copy_action = { None: 'copying',
'hard': 'hard linking',
'sym': 'symbolically linking' }
def _copy_file_contents (src, dst, buffer_size=16*1024):
"""Copy the file 'src' to 'dst'; both must be filenames. Any error
opening either file, reading from 'src', or writing to 'dst', raises
DistutilsFileError. Data is read/written in chunks of 'buffer_size'
bytes (default 16k). No attempt is made to handle anything apart from
regular files.
"""
# Stolen from shutil module in the standard library, but with
# custom error-handling added.
fsrc = None
fdst = None
try:
try:
fsrc = open(src, 'rb')
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not open '%s': %s" % (src, errstr)
if os.path.exists(dst):
try:
os.unlink(dst)
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not delete '%s': %s" % (dst, errstr)
try:
fdst = open(dst, 'wb')
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not create '%s': %s" % (dst, errstr)
while 1:
try:
buf = fsrc.read(buffer_size)
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not read from '%s': %s" % (src, errstr)
if not buf:
break
try:
fdst.write(buf)
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not write to '%s': %s" % (dst, errstr)
finally:
if fdst:
fdst.close()
if fsrc:
fsrc.close()
# _copy_file_contents()
def copy_file (src, dst,
preserve_mode=1,
preserve_times=1,
update=0,
link=None,
verbose=0,
dry_run=0):
"""Copy a file 'src' to 'dst'. If 'dst' is a directory, then 'src' is
copied there with the same name; otherwise, it must be a filename. (If
the file exists, it will be ruthlessly clobbered.) If 'preserve_mode'
is true (the default), the file's mode (type and permission bits, or
whatever is analogous on the current platform) is copied. If
'preserve_times' is true (the default), the last-modified and
last-access times are copied as well. If 'update' is true, 'src' will
only be copied if 'dst' does not exist, or if 'dst' does exist but is
older than 'src'.
'link' allows you to make hard links (os.link) or symbolic links
(os.symlink) instead of copying: set it to "hard" or "sym"; if it is
None (the default), files are copied. Don't set 'link' on systems that
don't support it: 'copy_file()' doesn't check if hard or symbolic
linking is available.
Under Mac OS, uses the native file copy function in macostools; on
other systems, uses '_copy_file_contents()' to copy file contents.
Return a tuple (dest_name, copied): 'dest_name' is the actual name of
the output file, and 'copied' is true if the file was copied (or would
have been copied, if 'dry_run' true).
"""
# XXX if the destination file already exists, we clobber it if
# copying, but blow up if linking. Hmmm. And I don't know what
# macostools.copyfile() does. Should definitely be consistent, and
# should probably blow up if destination exists and we would be
# changing it (ie. it's not already a hard/soft link to src OR
# (not update) and (src newer than dst).
from distutils.dep_util import newer
from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE
if not os.path.isfile(src):
raise DistutilsFileError, \
"can't copy '%s': doesn't exist or not a regular file" % src
if os.path.isdir(dst):
dir = dst
dst = os.path.join(dst, os.path.basename(src))
else:
dir = os.path.dirname(dst)
if update and not newer(src, dst):
log.debug("not copying %s (output up-to-date)", src)
return dst, 0
try:
action = _copy_action[link]
except KeyError:
raise ValueError, \
"invalid value '%s' for 'link' argument" % link
if os.path.basename(dst) == os.path.basename(src):
log.info("%s %s -> %s", action, src, dir)
else:
log.info("%s %s -> %s", action, src, dst)
if dry_run:
return (dst, 1)
# On Mac OS, use the native file copy routine
if os.name == 'mac':
import macostools
try:
macostools.copy(src, dst, 0, preserve_times)
except os.error, exc:
raise DistutilsFileError, \
"could not copy '%s' to '%s': %s" % (src, dst, exc[-1])
# If linking (hard or symbolic), use the appropriate system call
# (Unix only, of course, but that's the caller's responsibility)
elif link == 'hard':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
os.link(src, dst)
elif link == 'sym':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
os.symlink(src, dst)
# Otherwise (non-Mac, not linking), copy the file contents and
# (optionally) copy the times and mode.
else:
_copy_file_contents(src, dst)
if preserve_mode or preserve_times:
st = os.stat(src)
# According to David Ascher <da@ski.org>, utime() should be done
# before chmod() (at least under NT).
if preserve_times:
os.utime(dst, (st[ST_ATIME], st[ST_MTIME]))
if preserve_mode:
os.chmod(dst, S_IMODE(st[ST_MODE]))
return (dst, 1)
# copy_file ()
# XXX I suspect this is Unix-specific -- need porting help!
def move_file (src, dst,
verbose=0,
dry_run=0):
"""Move a file 'src' to 'dst'. If 'dst' is a directory, the file will
be moved into it with the same name; otherwise, 'src' is just renamed
to 'dst'. Return the new full name of the file.
Handles cross-device moves on Unix using 'copy_file()'. What about
other systems???
"""
from os.path import exists, isfile, isdir, basename, dirname
import errno
log.info("moving %s -> %s", src, dst)
if dry_run:
return dst
if not isfile(src):
raise DistutilsFileError, \
"can't move '%s': not a regular file" % src
if isdir(dst):
dst = os.path.join(dst, basename(src))
elif exists(dst):
raise DistutilsFileError, \
"can't move '%s': destination '%s' already exists" % \
(src, dst)
if not isdir(dirname(dst)):
raise DistutilsFileError, \
"can't move '%s': destination '%s' not a valid path" % \
(src, dst)
copy_it = 0
try:
os.rename(src, dst)
except os.error, (num, msg):
if num == errno.EXDEV:
copy_it = 1
else:
raise DistutilsFileError, \
"couldn't move '%s' to '%s': %s" % (src, dst, msg)
if copy_it:
copy_file(src, dst)
try:
os.unlink(src)
except os.error, (num, msg):
try:
os.unlink(dst)
except os.error:
pass
raise DistutilsFileError, \
("couldn't move '%s' to '%s' by copy/delete: " +
"delete '%s' failed: %s") % \
(src, dst, src, msg)
return dst
# move_file ()
def write_file (filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
f = open(filename, "w")
for line in contents:
f.write(line + "\n")
f.close()
|
trivoldus28/pulsarch-verilog
|
tools/local/bas-release/bas,3.9-SunOS-i386/lib/python/lib/python2.4/distutils/file_util.py
|
Python
|
gpl-2.0
| 8,320
|
"""Dirac notation for states."""
from __future__ import print_function, division
from sympy import (cacheit, conjugate, Expr, Function, integrate, oo, sqrt,
Tuple)
from sympy.core.compatibility import range
from sympy.printing.pretty.stringpict import stringPict
from sympy.physics.quantum.qexpr import QExpr, dispatch_method
__all__ = [
'KetBase',
'BraBase',
'StateBase',
'State',
'Ket',
'Bra',
'TimeDepState',
'TimeDepBra',
'TimeDepKet',
'Wavefunction'
]
#-----------------------------------------------------------------------------
# States, bras and kets.
#-----------------------------------------------------------------------------
# ASCII brackets
_lbracket = "<"
_rbracket = ">"
_straight_bracket = "|"
# Unicode brackets
# MATHEMATICAL ANGLE BRACKETS
_lbracket_ucode = u"\N{MATHEMATICAL LEFT ANGLE BRACKET}"
_rbracket_ucode = u"\N{MATHEMATICAL RIGHT ANGLE BRACKET}"
# LIGHT VERTICAL BAR
_straight_bracket_ucode = u"\N{LIGHT VERTICAL BAR}"
# Other options for unicode printing of <, > and | for Dirac notation.
# LEFT-POINTING ANGLE BRACKET
# _lbracket = u"\u2329"
# _rbracket = u"\u232A"
# LEFT ANGLE BRACKET
# _lbracket = u"\u3008"
# _rbracket = u"\u3009"
# VERTICAL LINE
# _straight_bracket = u"\u007C"
class StateBase(QExpr):
"""Abstract base class for general abstract states in quantum mechanics.
All other state classes defined will need to inherit from this class. It
carries the basic structure for all other states such as dual, _eval_adjoint
and label.
This is an abstract base class and you should not instantiate it directly,
instead use State.
"""
@classmethod
def _operators_to_state(self, ops, **options):
""" Returns the eigenstate instance for the passed operators.
This method should be overridden in subclasses. It will handle being
passed either an Operator instance or set of Operator instances. It
should return the corresponding state INSTANCE or simply raise a
NotImplementedError. See cartesian.py for an example.
"""
raise NotImplementedError("Cannot map operators to states in this class. Method not implemented!")
def _state_to_operators(self, op_classes, **options):
""" Returns the operators which this state instance is an eigenstate
of.
This method should be overridden in subclasses. It will be called on
state instances and be passed the operator classes that we wish to make
into instances. The state instance will then transform the classes
appropriately, or raise a NotImplementedError if it cannot return
operator instances. See cartesian.py for examples,
"""
raise NotImplementedError(
"Cannot map this state to operators. Method not implemented!")
@property
def operators(self):
"""Return the operator(s) that this state is an eigenstate of"""
from .operatorset import state_to_operators # import internally to avoid circular import errors
return state_to_operators(self)
def _enumerate_state(self, num_states, **options):
raise NotImplementedError("Cannot enumerate this state!")
def _represent_default_basis(self, **options):
return self._represent(basis=self.operators)
#-------------------------------------------------------------------------
# Dagger/dual
#-------------------------------------------------------------------------
@property
def dual(self):
"""Return the dual state of this one."""
return self.dual_class()._new_rawargs(self.hilbert_space, *self.args)
@classmethod
def dual_class(self):
"""Return the class used to construct the dual."""
raise NotImplementedError(
'dual_class must be implemented in a subclass'
)
def _eval_adjoint(self):
"""Compute the dagger of this state using the dual."""
return self.dual
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _pretty_brackets(self, height, use_unicode=True):
# Return pretty printed brackets for the state
# Ideally, this could be done by pform.parens but it does not support the angled < and >
# Setup for unicode vs ascii
if use_unicode:
lbracket, rbracket = self.lbracket_ucode, self.rbracket_ucode
slash, bslash, vert = u'\N{BOX DRAWINGS LIGHT DIAGONAL UPPER RIGHT TO LOWER LEFT}', \
u'\N{BOX DRAWINGS LIGHT DIAGONAL UPPER LEFT TO LOWER RIGHT}', \
u'\N{BOX DRAWINGS LIGHT VERTICAL}'
else:
lbracket, rbracket = self.lbracket, self.rbracket
slash, bslash, vert = '/', '\\', '|'
# If height is 1, just return brackets
if height == 1:
return stringPict(lbracket), stringPict(rbracket)
# Make height even
height += (height % 2)
brackets = []
for bracket in lbracket, rbracket:
# Create left bracket
if bracket in {_lbracket, _lbracket_ucode}:
bracket_args = [ ' ' * (height//2 - i - 1) +
slash for i in range(height // 2)]
bracket_args.extend(
[ ' ' * i + bslash for i in range(height // 2)])
# Create right bracket
elif bracket in {_rbracket, _rbracket_ucode}:
bracket_args = [ ' ' * i + bslash for i in range(height // 2)]
bracket_args.extend([ ' ' * (
height//2 - i - 1) + slash for i in range(height // 2)])
# Create straight bracket
elif bracket in {_straight_bracket, _straight_bracket_ucode}:
bracket_args = [vert for i in range(height)]
else:
raise ValueError(bracket)
brackets.append(
stringPict('\n'.join(bracket_args), baseline=height//2))
return brackets
def _sympystr(self, printer, *args):
contents = self._print_contents(printer, *args)
return '%s%s%s' % (self.lbracket, contents, self.rbracket)
def _pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
# Get brackets
pform = self._print_contents_pretty(printer, *args)
lbracket, rbracket = self._pretty_brackets(
pform.height(), printer._use_unicode)
# Put together state
pform = prettyForm(*pform.left(lbracket))
pform = prettyForm(*pform.right(rbracket))
return pform
def _latex(self, printer, *args):
contents = self._print_contents_latex(printer, *args)
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
return '{%s%s%s}' % (self.lbracket_latex, contents, self.rbracket_latex)
class KetBase(StateBase):
"""Base class for Kets.
This class defines the dual property and the brackets for printing. This is
an abstract base class and you should not instantiate it directly, instead
use Ket.
"""
lbracket = _straight_bracket
rbracket = _rbracket
lbracket_ucode = _straight_bracket_ucode
rbracket_ucode = _rbracket_ucode
lbracket_latex = r'\left|'
rbracket_latex = r'\right\rangle '
@classmethod
def default_args(self):
return ("psi",)
@classmethod
def dual_class(self):
return BraBase
def __mul__(self, other):
"""KetBase*other"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, BraBase):
return OuterProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*KetBase"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, BraBase):
return InnerProduct(other, self)
else:
return Expr.__rmul__(self, other)
#-------------------------------------------------------------------------
# _eval_* methods
#-------------------------------------------------------------------------
def _eval_innerproduct(self, bra, **hints):
"""Evaluate the inner product between this ket and a bra.
This is called to compute <bra|ket>, where the ket is ``self``.
This method will dispatch to sub-methods having the format::
``def _eval_innerproduct_BraClass(self, **hints):``
Subclasses should define these methods (one for each BraClass) to
teach the ket how to take inner products with bras.
"""
return dispatch_method(self, '_eval_innerproduct', bra, **hints)
def _apply_operator(self, op, **options):
"""Apply an Operator to this Ket.
This method will dispatch to methods having the format::
``def _apply_operator_OperatorName(op, **options):``
Subclasses should define these methods (one for each OperatorName) to
teach the Ket how operators act on it.
Parameters
==========
op : Operator
The Operator that is acting on the Ket.
options : dict
A dict of key/value pairs that control how the operator is applied
to the Ket.
"""
return dispatch_method(self, '_apply_operator', op, **options)
class BraBase(StateBase):
"""Base class for Bras.
This class defines the dual property and the brackets for printing. This
is an abstract base class and you should not instantiate it directly,
instead use Bra.
"""
lbracket = _lbracket
rbracket = _straight_bracket
lbracket_ucode = _lbracket_ucode
rbracket_ucode = _straight_bracket_ucode
lbracket_latex = r'\left\langle '
rbracket_latex = r'\right|'
@classmethod
def _operators_to_state(self, ops, **options):
state = self.dual_class().operators_to_state(ops, **options)
return state.dual
def _state_to_operators(self, op_classes, **options):
return self.dual._state_to_operators(op_classes, **options)
def _enumerate_state(self, num_states, **options):
dual_states = self.dual._enumerate_state(num_states, **options)
return [x.dual for x in dual_states]
@classmethod
def default_args(self):
return self.dual_class().default_args()
@classmethod
def dual_class(self):
return KetBase
def __mul__(self, other):
"""BraBase*other"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, KetBase):
return InnerProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*BraBase"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, KetBase):
return OuterProduct(other, self)
else:
return Expr.__rmul__(self, other)
def _represent(self, **options):
"""A default represent that uses the Ket's version."""
from sympy.physics.quantum.dagger import Dagger
return Dagger(self.dual._represent(**options))
class State(StateBase):
"""General abstract quantum state used as a base class for Ket and Bra."""
pass
class Ket(State, KetBase):
"""A general time-independent Ket in quantum mechanics.
Inherits from State and KetBase. This class should be used as the base
class for all physical, time-independent Kets in a system. This class
and its subclasses will be the main classes that users will use for
expressing Kets in Dirac notation [1]_.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Ket and looking at its properties::
>>> from sympy.physics.quantum import Ket, Bra
>>> from sympy import symbols, I
>>> k = Ket('psi')
>>> k
|psi>
>>> k.hilbert_space
H
>>> k.is_commutative
False
>>> k.label
(psi,)
Ket's know about their associated bra::
>>> k.dual
<psi|
>>> k.dual_class()
<class 'sympy.physics.quantum.state.Bra'>
Take a linear combination of two kets::
>>> k0 = Ket(0)
>>> k1 = Ket(1)
>>> 2*I*k0 - 4*k1
2*I*|0> - 4*|1>
Compound labels are passed as tuples::
>>> n, m = symbols('n,m')
>>> k = Ket(n,m)
>>> k
|nm>
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Bra
class Bra(State, BraBase):
"""A general time-independent Bra in quantum mechanics.
Inherits from State and BraBase. A Bra is the dual of a Ket [1]_. This
class and its subclasses will be the main classes that users will use for
expressing Bras in Dirac notation.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Bra and look at its properties::
>>> from sympy.physics.quantum import Ket, Bra
>>> from sympy import symbols, I
>>> b = Bra('psi')
>>> b
<psi|
>>> b.hilbert_space
H
>>> b.is_commutative
False
Bra's know about their dual Ket's::
>>> b.dual
|psi>
>>> b.dual_class()
<class 'sympy.physics.quantum.state.Ket'>
Like Kets, Bras can have compound labels and be manipulated in a similar
manner::
>>> n, m = symbols('n,m')
>>> b = Bra(n,m) - I*Bra(m,n)
>>> b
-I*<mn| + <nm|
Symbols in a Bra can be substituted using ``.subs``::
>>> b.subs(n,m)
<mm| - I*<mm|
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Ket
#-----------------------------------------------------------------------------
# Time dependent states, bras and kets.
#-----------------------------------------------------------------------------
class TimeDepState(StateBase):
"""Base class for a general time-dependent quantum state.
This class is used as a base class for any time-dependent state. The main
difference between this class and the time-independent state is that this
class takes a second argument that is the time in addition to the usual
label argument.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
"""
#-------------------------------------------------------------------------
# Initialization
#-------------------------------------------------------------------------
@classmethod
def default_args(self):
return ("psi", "t")
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def label(self):
"""The label of the state."""
return self.args[:-1]
@property
def time(self):
"""The time of the state."""
return self.args[-1]
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _print_time(self, printer, *args):
return printer._print(self.time, *args)
_print_time_repr = _print_time
_print_time_latex = _print_time
def _print_time_pretty(self, printer, *args):
pform = printer._print(self.time, *args)
return pform
def _print_contents(self, printer, *args):
label = self._print_label(printer, *args)
time = self._print_time(printer, *args)
return '%s;%s' % (label, time)
def _print_label_repr(self, printer, *args):
label = self._print_sequence(self.label, ',', printer, *args)
time = self._print_time_repr(printer, *args)
return '%s,%s' % (label, time)
def _print_contents_pretty(self, printer, *args):
label = self._print_label_pretty(printer, *args)
time = self._print_time_pretty(printer, *args)
return printer._print_seq((label, time), delimiter=';')
def _print_contents_latex(self, printer, *args):
label = self._print_sequence(
self.label, self._label_separator, printer, *args)
time = self._print_time_latex(printer, *args)
return '%s;%s' % (label, time)
class TimeDepKet(TimeDepState, KetBase):
"""General time-dependent Ket in quantum mechanics.
This inherits from ``TimeDepState`` and ``KetBase`` and is the main class
that should be used for Kets that vary with time. Its dual is a
``TimeDepBra``.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
Create a TimeDepKet and look at its attributes::
>>> from sympy.physics.quantum import TimeDepKet
>>> k = TimeDepKet('psi', 't')
>>> k
|psi;t>
>>> k.time
t
>>> k.label
(psi,)
>>> k.hilbert_space
H
TimeDepKets know about their dual bra::
>>> k.dual
<psi;t|
>>> k.dual_class()
<class 'sympy.physics.quantum.state.TimeDepBra'>
"""
@classmethod
def dual_class(self):
return TimeDepBra
class TimeDepBra(TimeDepState, BraBase):
"""General time-dependent Bra in quantum mechanics.
This inherits from TimeDepState and BraBase and is the main class that
should be used for Bras that vary with time. Its dual is a TimeDepBra.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
>>> from sympy.physics.quantum import TimeDepBra
>>> from sympy import symbols, I
>>> b = TimeDepBra('psi', 't')
>>> b
<psi;t|
>>> b.time
t
>>> b.label
(psi,)
>>> b.hilbert_space
H
>>> b.dual
|psi;t>
"""
@classmethod
def dual_class(self):
return TimeDepKet
class Wavefunction(Function):
"""Class for representations in continuous bases
This class takes an expression and coordinates in its constructor. It can
be used to easily calculate normalizations and probabilities.
Parameters
==========
expr : Expr
The expression representing the functional form of the w.f.
coords : Symbol or tuple
The coordinates to be integrated over, and their bounds
Examples
========
Particle in a box, specifying bounds in the more primitive way of using
Piecewise:
>>> from sympy import Symbol, Piecewise, pi, N
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x = Symbol('x', real=True)
>>> n = 1
>>> L = 1
>>> g = Piecewise((0, x < 0), (0, x > L), (sqrt(2//L)*sin(n*pi*x/L), True))
>>> f = Wavefunction(g, x)
>>> f.norm
1
>>> f.is_normalized
True
>>> p = f.prob()
>>> p(0)
0
>>> p(L)
0
>>> p(0.5)
2
>>> p(0.85*L)
2*sin(0.85*pi)**2
>>> N(p(0.85*L))
0.412214747707527
Additionally, you can specify the bounds of the function and the indices in
a more compact way:
>>> from sympy import symbols, pi, diff
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> f(L+1)
0
>>> f(L-1)
sqrt(2)*sin(pi*n*(L - 1)/L)/sqrt(L)
>>> f(-1)
0
>>> f(0.85)
sqrt(2)*sin(0.85*pi*n/L)/sqrt(L)
>>> f(0.85, n=1, L=1)
sqrt(2)*sin(0.85*pi)
>>> f.is_commutative
False
All arguments are automatically sympified, so you can define the variables
as strings rather than symbols:
>>> expr = x**2
>>> f = Wavefunction(expr, 'x')
>>> type(f.variables[0])
<class 'sympy.core.symbol.Symbol'>
Derivatives of Wavefunctions will return Wavefunctions:
>>> diff(f, x)
Wavefunction(2*x, x)
"""
#Any passed tuples for coordinates and their bounds need to be
#converted to Tuples before Function's constructor is called, to
#avoid errors from calling is_Float in the constructor
def __new__(cls, *args, **options):
new_args = [None for i in args]
ct = 0
for arg in args:
if isinstance(arg, tuple):
new_args[ct] = Tuple(*arg)
else:
new_args[ct] = arg
ct += 1
return super(Function, cls).__new__(cls, *new_args, **options)
def __call__(self, *args, **options):
var = self.variables
if len(args) != len(var):
raise NotImplementedError(
"Incorrect number of arguments to function!")
ct = 0
#If the passed value is outside the specified bounds, return 0
for v in var:
lower, upper = self.limits[v]
#Do the comparison to limits only if the passed symbol is actually
#a symbol present in the limits;
#Had problems with a comparison of x > L
if isinstance(args[ct], Expr) and \
not (lower in args[ct].free_symbols
or upper in args[ct].free_symbols):
continue
if (args[ct] < lower) == True or (args[ct] > upper) == True:
return 0
ct += 1
expr = self.expr
#Allows user to make a call like f(2, 4, m=1, n=1)
for symbol in list(expr.free_symbols):
if str(symbol) in options.keys():
val = options[str(symbol)]
expr = expr.subs(symbol, val)
return expr.subs(zip(var, args))
def _eval_derivative(self, symbol):
expr = self.expr
deriv = expr._eval_derivative(symbol)
return Wavefunction(deriv, *self.args[1:])
def _eval_conjugate(self):
return Wavefunction(conjugate(self.expr), *self.args[1:])
def _eval_transpose(self):
return self
@property
def free_symbols(self):
return self.expr.free_symbols
@property
def is_commutative(self):
"""
Override Function's is_commutative so that order is preserved in
represented expressions
"""
return False
@classmethod
def eval(self, *args):
return None
@property
def variables(self):
"""
Return the coordinates which the wavefunction depends on
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x,y = symbols('x,y')
>>> f = Wavefunction(x*y, x, y)
>>> f.variables
(x, y)
>>> g = Wavefunction(x*y, x)
>>> g.variables
(x,)
"""
var = [g[0] if isinstance(g, Tuple) else g for g in self._args[1:]]
return tuple(var)
@property
def limits(self):
"""
Return the limits of the coordinates which the w.f. depends on If no
limits are specified, defaults to ``(-oo, oo)``.
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, (x, 0, 1))
>>> f.limits
{x: (0, 1)}
>>> f = Wavefunction(x**2, x)
>>> f.limits
{x: (-oo, oo)}
>>> f = Wavefunction(x**2 + y**2, x, (y, -1, 2))
>>> f.limits
{x: (-oo, oo), y: (-1, 2)}
"""
limits = [(g[1], g[2]) if isinstance(g, Tuple) else (-oo, oo)
for g in self._args[1:]]
return dict(zip(self.variables, tuple(limits)))
@property
def expr(self):
"""
Return the expression which is the functional form of the Wavefunction
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, x)
>>> f.expr
x**2
"""
return self._args[0]
@property
def is_normalized(self):
"""
Returns true if the Wavefunction is properly normalized
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.is_normalized
True
"""
return (self.norm == 1.0)
@property
@cacheit
def norm(self):
"""
Return the normalization of the specified functional form.
This function integrates over the coordinates of the Wavefunction, with
the bounds specified.
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
sqrt(2)*sqrt(L)/2
"""
exp = self.expr*conjugate(self.expr)
var = self.variables
limits = self.limits
for v in var:
curr_limits = limits[v]
exp = integrate(exp, (v, curr_limits[0], curr_limits[1]))
return sqrt(exp)
def normalize(self):
"""
Return a normalized version of the Wavefunction
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x = symbols('x', real=True)
>>> L = symbols('L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.normalize()
Wavefunction(sqrt(2)*sin(pi*n*x/L)/sqrt(L), (x, 0, L))
"""
const = self.norm
if const == oo:
raise NotImplementedError("The function is not normalizable!")
else:
return Wavefunction((const)**(-1)*self.expr, *self.args[1:])
def prob(self):
r"""
Return the absolute magnitude of the w.f., `|\psi(x)|^2`
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', real=True)
>>> n = symbols('n', integer=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.prob()
Wavefunction(sin(pi*n*x/L)**2, x)
"""
return Wavefunction(self.expr*conjugate(self.expr), *self.variables)
|
wxgeo/geophar
|
wxgeometrie/sympy/physics/quantum/state.py
|
Python
|
gpl-2.0
| 29,157
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.olsr', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator', u'ns3::AttributeConstructionList::CIterator')
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator*', u'ns3::AttributeConstructionList::CIterator*')
typehandlers.add_type_alias(u'std::list< ns3::AttributeConstructionList::Item > const_iterator&', u'ns3::AttributeConstructionList::CIterator&')
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeChecker'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeValue'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::EventImpl> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::EventImpl'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::NixVector> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::NixVector'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Packet> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Packet'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor'])
## event-garbage-collector.h (module 'core'): ns3::EventGarbageCollector [class]
module.add_class('EventGarbageCollector', import_from_module='ns.core')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress [class]
module.add_class('Inet6SocketAddress', import_from_module='ns.network')
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress [class]
root_module['ns3::Inet6SocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress [class]
module.add_class('InetSocketAddress', import_from_module='ns.network')
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress [class]
root_module['ns3::InetSocketAddress'].implicitly_converts_to(root_module['ns3::Address'])
## int-to-type.h (module 'core'): ns3::IntToType<0> [struct]
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['0'])
## int-to-type.h (module 'core'): ns3::IntToType<0>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 0 >'], import_from_module='ns.core')
## int-to-type.h (module 'core'): ns3::IntToType<1> [struct]
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['1'])
## int-to-type.h (module 'core'): ns3::IntToType<1>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 1 >'], import_from_module='ns.core')
## int-to-type.h (module 'core'): ns3::IntToType<2> [struct]
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['2'])
## int-to-type.h (module 'core'): ns3::IntToType<2>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 2 >'], import_from_module='ns.core')
## int-to-type.h (module 'core'): ns3::IntToType<3> [struct]
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['3'])
## int-to-type.h (module 'core'): ns3::IntToType<3>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 3 >'], import_from_module='ns.core')
## int-to-type.h (module 'core'): ns3::IntToType<4> [struct]
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['4'])
## int-to-type.h (module 'core'): ns3::IntToType<4>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 4 >'], import_from_module='ns.core')
## int-to-type.h (module 'core'): ns3::IntToType<5> [struct]
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['5'])
## int-to-type.h (module 'core'): ns3::IntToType<5>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 5 >'], import_from_module='ns.core')
## int-to-type.h (module 'core'): ns3::IntToType<6> [struct]
module.add_class('IntToType', import_from_module='ns.core', template_parameters=['6'])
## int-to-type.h (module 'core'): ns3::IntToType<6>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 6 >'], import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress [class]
module.add_class('Ipv4InterfaceAddress', import_from_module='ns.internet')
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e [enumeration]
module.add_enum('InterfaceAddressScope_e', ['HOST', 'LINK', 'GLOBAL'], outer_class=root_module['ns3::Ipv4InterfaceAddress'], import_from_module='ns.internet')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper [class]
module.add_class('Ipv4RoutingHelper', allow_subclassing=True, import_from_module='ns.internet')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
module.add_class('Mac48Address', import_from_module='ns.network')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Mac48Address )', u'ns3::Mac48Address::TracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Mac48Address )*', u'ns3::Mac48Address::TracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Mac48Address )&', u'ns3::Mac48Address::TracedCallback&')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
## mac8-address.h (module 'network'): ns3::Mac8Address [class]
module.add_class('Mac8Address', import_from_module='ns.network')
## mac8-address.h (module 'network'): ns3::Mac8Address [class]
root_module['ns3::Mac8Address'].implicitly_converts_to(root_module['ns3::Address'])
## node-container.h (module 'network'): ns3::NodeContainer [class]
module.add_class('NodeContainer', import_from_module='ns.network')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::Node > > const_iterator', u'ns3::NodeContainer::Iterator')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::Node > > const_iterator*', u'ns3::NodeContainer::Iterator*')
typehandlers.add_type_alias(u'std::vector< ns3::Ptr< ns3::Node > > const_iterator&', u'ns3::NodeContainer::Iterator&')
## non-copyable.h (module 'core'): ns3::NonCopyable [class]
module.add_class('NonCopyable', destructor_visibility='protected', import_from_module='ns.core')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## olsr-helper.h (module 'olsr'): ns3::OlsrHelper [class]
module.add_class('OlsrHelper', parent=root_module['ns3::Ipv4RoutingHelper'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::ItemType [enumeration]
module.add_enum('ItemType', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simulator.h (module 'core'): ns3::Simulator [class]
module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core')
## simulator.h (module 'core'): ns3::Simulator [enumeration]
module.add_enum('', ['NO_CONTEXT'], outer_class=root_module['ns3::Simulator'], import_from_module='ns.core')
## system-wall-clock-ms.h (module 'core'): ns3::SystemWallClockMs [class]
module.add_class('SystemWallClockMs', import_from_module='ns.core')
## tag.h (module 'network'): ns3::Tag [class]
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## nstime.h (module 'core'): ns3::TimeWithUnit [class]
module.add_class('TimeWithUnit', import_from_module='ns.core')
## timer.h (module 'core'): ns3::Timer [class]
module.add_class('Timer', import_from_module='ns.core')
## timer.h (module 'core'): ns3::Timer::DestroyPolicy [enumeration]
module.add_enum('DestroyPolicy', ['CANCEL_ON_DESTROY', 'REMOVE_ON_DESTROY', 'CHECK_ON_DESTROY'], outer_class=root_module['ns3::Timer'], import_from_module='ns.core')
## timer.h (module 'core'): ns3::Timer::State [enumeration]
module.add_enum('State', ['RUNNING', 'EXPIRED', 'SUSPENDED'], outer_class=root_module['ns3::Timer'], import_from_module='ns.core')
## timer-impl.h (module 'core'): ns3::TimerImpl [class]
module.add_class('TimerImpl', allow_subclassing=True, import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::SupportLevel [enumeration]
module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
typehandlers.add_type_alias(u'uint32_t', u'ns3::TypeId::hash_t')
typehandlers.add_type_alias(u'uint32_t*', u'ns3::TypeId::hash_t*')
typehandlers.add_type_alias(u'uint32_t&', u'ns3::TypeId::hash_t&')
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-128.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## int64x64-128.h (module 'core'): ns3::int64x64_t::impl_type [enumeration]
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
## chunk.h (module 'network'): ns3::Chunk [class]
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## header.h (module 'network'): ns3::Header [class]
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## ipv4-header.h (module 'internet'): ns3::Ipv4Header [class]
module.add_class('Ipv4Header', import_from_module='ns.internet', parent=root_module['ns3::Header'])
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::DscpType [enumeration]
module.add_enum('DscpType', ['DscpDefault', 'DSCP_CS1', 'DSCP_AF11', 'DSCP_AF12', 'DSCP_AF13', 'DSCP_CS2', 'DSCP_AF21', 'DSCP_AF22', 'DSCP_AF23', 'DSCP_CS3', 'DSCP_AF31', 'DSCP_AF32', 'DSCP_AF33', 'DSCP_CS4', 'DSCP_AF41', 'DSCP_AF42', 'DSCP_AF43', 'DSCP_CS5', 'DSCP_EF', 'DSCP_CS6', 'DSCP_CS7'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet')
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::EcnType [enumeration]
module.add_enum('EcnType', ['ECN_NotECT', 'ECN_ECT1', 'ECN_ECT0', 'ECN_CE'], outer_class=root_module['ns3::Ipv4Header'], import_from_module='ns.internet')
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## random-variable-stream.h (module 'core'): ns3::RandomVariableStream [class]
module.add_class('RandomVariableStream', import_from_module='ns.core', parent=root_module['ns3::Object'])
## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable [class]
module.add_class('SequentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4MulticastRoute', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4MulticastRoute>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Ipv4Route', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Ipv4Route>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## socket.h (module 'network'): ns3::Socket [class]
module.add_class('Socket', import_from_module='ns.network', parent=root_module['ns3::Object'])
## socket.h (module 'network'): ns3::Socket::SocketErrno [enumeration]
module.add_enum('SocketErrno', ['ERROR_NOTERROR', 'ERROR_ISCONN', 'ERROR_NOTCONN', 'ERROR_MSGSIZE', 'ERROR_AGAIN', 'ERROR_SHUTDOWN', 'ERROR_OPNOTSUPP', 'ERROR_AFNOSUPPORT', 'ERROR_INVAL', 'ERROR_BADF', 'ERROR_NOROUTETOHOST', 'ERROR_NODEV', 'ERROR_ADDRNOTAVAIL', 'ERROR_ADDRINUSE', 'SOCKET_ERRNO_LAST'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
## socket.h (module 'network'): ns3::Socket::SocketType [enumeration]
module.add_enum('SocketType', ['NS3_SOCK_STREAM', 'NS3_SOCK_SEQPACKET', 'NS3_SOCK_DGRAM', 'NS3_SOCK_RAW'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
## socket.h (module 'network'): ns3::Socket::SocketPriority [enumeration]
module.add_enum('SocketPriority', ['NS3_PRIO_BESTEFFORT', 'NS3_PRIO_FILLER', 'NS3_PRIO_BULK', 'NS3_PRIO_INTERACTIVE_BULK', 'NS3_PRIO_INTERACTIVE', 'NS3_PRIO_CONTROL'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
## socket.h (module 'network'): ns3::Socket::Ipv6MulticastFilterMode [enumeration]
module.add_enum('Ipv6MulticastFilterMode', ['INCLUDE', 'EXCLUDE'], outer_class=root_module['ns3::Socket'], import_from_module='ns.network')
## socket.h (module 'network'): ns3::SocketIpTosTag [class]
module.add_class('SocketIpTosTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
## socket.h (module 'network'): ns3::SocketIpTtlTag [class]
module.add_class('SocketIpTtlTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
## socket.h (module 'network'): ns3::SocketIpv6HopLimitTag [class]
module.add_class('SocketIpv6HopLimitTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
## socket.h (module 'network'): ns3::SocketIpv6TclassTag [class]
module.add_class('SocketIpv6TclassTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
## socket.h (module 'network'): ns3::SocketPriorityTag [class]
module.add_class('SocketPriorityTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
## socket.h (module 'network'): ns3::SocketSetDontFragmentTag [class]
module.add_class('SocketSetDontFragmentTag', import_from_module='ns.network', parent=root_module['ns3::Tag'])
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time )', u'ns3::Time::TracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time )*', u'ns3::Time::TracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time )&', u'ns3::Time::TracedCallback&')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## trailer.h (module 'network'): ns3::Trailer [class]
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable [class]
module.add_class('TriangularRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable [class]
module.add_class('UniformRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable [class]
module.add_class('WeibullRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable [class]
module.add_class('ZetaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable [class]
module.add_class('ZipfRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable [class]
module.add_class('ConstantRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable [class]
module.add_class('DeterministicRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable [class]
module.add_class('EmpiricalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor [class]
module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor'])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker [class]
module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable [class]
module.add_class('ErlangRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## event-impl.h (module 'core'): ns3::EventImpl [class]
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable [class]
module.add_class('ExponentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable [class]
module.add_class('GammaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## ipv4.h (module 'internet'): ns3::Ipv4 [class]
module.add_class('Ipv4', import_from_module='ns.internet', parent=root_module['ns3::Object'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute [class]
module.add_class('Ipv4MulticastRoute', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >'])
## ipv4-route.h (module 'internet'): ns3::Ipv4Route [class]
module.add_class('Ipv4Route', import_from_module='ns.internet', parent=root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >'])
## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol [class]
module.add_class('Ipv4RoutingProtocol', import_from_module='ns.internet', parent=root_module['ns3::Object'])
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::Ipv4RoutingProtocol::UnicastForwardCallback')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::Ipv4RoutingProtocol::UnicastForwardCallback*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::Ipv4RoutingProtocol::UnicastForwardCallback&')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::Ipv4RoutingProtocol::MulticastForwardCallback')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::Ipv4RoutingProtocol::MulticastForwardCallback*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::Ipv4RoutingProtocol::MulticastForwardCallback&')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::Ipv4RoutingProtocol::LocalDeliverCallback')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::Ipv4RoutingProtocol::LocalDeliverCallback*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::Ipv4RoutingProtocol::LocalDeliverCallback&')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::Ipv4RoutingProtocol::ErrorCallback')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::Ipv4RoutingProtocol::ErrorCallback*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::Ipv4RoutingProtocol::ErrorCallback&')
## ipv4-static-routing.h (module 'internet'): ns3::Ipv4StaticRouting [class]
module.add_class('Ipv4StaticRouting', import_from_module='ns.internet', parent=root_module['ns3::Ipv4RoutingProtocol'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable [class]
module.add_class('LogNormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class]
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class]
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## net-device.h (module 'network'): ns3::NetDevice [class]
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
typehandlers.add_type_alias(u'void ( * ) ( )', u'ns3::NetDevice::LinkChangeTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( )*', u'ns3::NetDevice::LinkChangeTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( )&', u'ns3::NetDevice::LinkChangeTracedCallback&')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::NetDevice::ReceiveCallback')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::NetDevice::ReceiveCallback*')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::NetDevice::ReceiveCallback&')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', u'ns3::NetDevice::PromiscReceiveCallback')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::NetDevice::PromiscReceiveCallback*')
typehandlers.add_type_alias(u'ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::NetDevice::PromiscReceiveCallback&')
## nix-vector.h (module 'network'): ns3::NixVector [class]
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
## node.h (module 'network'): ns3::Node [class]
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', u'ns3::Node::ProtocolHandler')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::Node::ProtocolHandler*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::Node::ProtocolHandler&')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::Node::DeviceAdditionListener')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::Node::DeviceAdditionListener*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::Node::DeviceAdditionListener&')
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable [class]
module.add_class('NormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper [class]
module.add_class('OutputStreamWrapper', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
## packet.h (module 'network'): ns3::Packet [class]
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > )', u'ns3::Packet::TracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > )*', u'ns3::Packet::TracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > )&', u'ns3::Packet::TracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )', u'ns3::Packet::AddressTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )*', u'ns3::Packet::AddressTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Address const & )&', u'ns3::Packet::AddressTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )', u'ns3::Packet::TwoAddressTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )*', u'ns3::Packet::TwoAddressTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const > const, ns3::Address const &, ns3::Address const & )&', u'ns3::Packet::TwoAddressTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )', u'ns3::Packet::Mac48AddressTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )*', u'ns3::Packet::Mac48AddressTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, ns3::Mac48Address )&', u'ns3::Packet::Mac48AddressTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t )', u'ns3::Packet::SizeTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t )*', u'ns3::Packet::SizeTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t )&', u'ns3::Packet::SizeTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, double )', u'ns3::Packet::SinrTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, double )*', u'ns3::Packet::SinrTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Ptr< ns3::Packet const >, double )&', u'ns3::Packet::SinrTracedCallback&')
## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable [class]
module.add_class('ParetoRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## address.h (module 'network'): ns3::AddressChecker [class]
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## address.h (module 'network'): ns3::AddressValue [class]
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## callback.h (module 'core'): ns3::CallbackImpl<bool, ns3::Ptr<ns3::Socket>, const ns3::Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['bool', 'ns3::Ptr<ns3::Socket>', 'const ns3::Address &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
## callback.h (module 'core'): ns3::CallbackImpl<void, const ns3::olsr::PacketHeader &, const std::vector<ns3::olsr::MessageHeader, std::allocator<ns3::olsr::MessageHeader> > &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'const ns3::olsr::PacketHeader &', 'const std::vector<ns3::olsr::MessageHeader, std::allocator<ns3::olsr::MessageHeader> > &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<const ns3::Packet>', 'unsigned short', 'const ns3::Address &', 'const ns3::Address &', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::Socket>, const ns3::Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::Socket>', 'const ns3::Address &', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::Socket>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'ns3::Ptr<ns3::Socket>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
## callback.h (module 'core'): ns3::CallbackImpl<void, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', template_parameters=['void', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'], parent=root_module['ns3::CallbackImplBase'])
## ipv4-list-routing.h (module 'internet'): ns3::Ipv4ListRouting [class]
module.add_class('Ipv4ListRouting', import_from_module='ns.internet', parent=root_module['ns3::Ipv4RoutingProtocol'])
module.add_container('std::vector< ns3::Ipv6Address >', 'ns3::Ipv6Address', container_type=u'vector')
module.add_container('std::map< unsigned int, unsigned int >', ('unsigned int', 'unsigned int'), container_type=u'map')
module.add_container('std::vector< unsigned int >', 'unsigned int', container_type=u'vector')
module.add_container('std::vector< ns3::olsr::MessageHeader >', 'ns3::olsr::MessageHeader', container_type=u'vector')
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
## Register a nested module for the namespace TracedValueCallback
nested_module = module.add_cpp_namespace('TracedValueCallback')
register_types_ns3_TracedValueCallback(nested_module)
## Register a nested module for the namespace olsr
nested_module = module.add_cpp_namespace('olsr')
register_types_ns3_olsr(nested_module)
## Register a nested module for the namespace tests
nested_module = module.add_cpp_namespace('tests')
register_types_ns3_tests(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, std::size_t const )', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, std::size_t const )*', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, std::size_t const )&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, std::size_t const )', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, std::size_t const )*', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, std::size_t const )&', u'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_types_ns3_TracedValueCallback(module):
root_module = module.get_root()
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time )', u'ns3::TracedValueCallback::Time')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time )*', u'ns3::TracedValueCallback::Time*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time )&', u'ns3::TracedValueCallback::Time&')
def register_types_ns3_olsr(module):
root_module = module.get_root()
## olsr-repositories.h (module 'olsr'): ns3::olsr::Association [struct]
module.add_class('Association')
## olsr-repositories.h (module 'olsr'): ns3::olsr::AssociationTuple [struct]
module.add_class('AssociationTuple')
## olsr-repositories.h (module 'olsr'): ns3::olsr::DuplicateTuple [struct]
module.add_class('DuplicateTuple')
## olsr-repositories.h (module 'olsr'): ns3::olsr::IfaceAssocTuple [struct]
module.add_class('IfaceAssocTuple')
## olsr-repositories.h (module 'olsr'): ns3::olsr::LinkTuple [struct]
module.add_class('LinkTuple')
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader [class]
module.add_class('MessageHeader', parent=root_module['ns3::Header'])
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::MessageType [enumeration]
module.add_enum('MessageType', ['HELLO_MESSAGE', 'TC_MESSAGE', 'MID_MESSAGE', 'HNA_MESSAGE'], outer_class=root_module['ns3::olsr::MessageHeader'])
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hello [struct]
module.add_class('Hello', outer_class=root_module['ns3::olsr::MessageHeader'])
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hello::LinkMessage [struct]
module.add_class('LinkMessage', outer_class=root_module['ns3::olsr::MessageHeader::Hello'])
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hna [struct]
module.add_class('Hna', outer_class=root_module['ns3::olsr::MessageHeader'])
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hna::Association [struct]
module.add_class('Association', outer_class=root_module['ns3::olsr::MessageHeader::Hna'])
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Mid [struct]
module.add_class('Mid', outer_class=root_module['ns3::olsr::MessageHeader'])
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Tc [struct]
module.add_class('Tc', outer_class=root_module['ns3::olsr::MessageHeader'])
## olsr-repositories.h (module 'olsr'): ns3::olsr::MprSelectorTuple [struct]
module.add_class('MprSelectorTuple')
## olsr-repositories.h (module 'olsr'): ns3::olsr::NeighborTuple [struct]
module.add_class('NeighborTuple')
## olsr-repositories.h (module 'olsr'): ns3::olsr::NeighborTuple::Status [enumeration]
module.add_enum('Status', ['STATUS_NOT_SYM', 'STATUS_SYM'], outer_class=root_module['ns3::olsr::NeighborTuple'])
## olsr-state.h (module 'olsr'): ns3::olsr::OlsrState [class]
module.add_class('OlsrState')
## olsr-header.h (module 'olsr'): ns3::olsr::PacketHeader [class]
module.add_class('PacketHeader', parent=root_module['ns3::Header'])
## olsr-routing-protocol.h (module 'olsr'): ns3::olsr::RoutingProtocol [class]
module.add_class('RoutingProtocol', parent=root_module['ns3::Ipv4RoutingProtocol'])
typehandlers.add_type_alias(u'void ( * ) ( ns3::olsr::PacketHeader const &, ns3::olsr::MessageList const & )', u'ns3::olsr::RoutingProtocol::PacketTxRxTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( ns3::olsr::PacketHeader const &, ns3::olsr::MessageList const & )*', u'ns3::olsr::RoutingProtocol::PacketTxRxTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::olsr::PacketHeader const &, ns3::olsr::MessageList const & )&', u'ns3::olsr::RoutingProtocol::PacketTxRxTracedCallback&')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t )', u'ns3::olsr::RoutingProtocol::TableChangeTracedCallback')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t )*', u'ns3::olsr::RoutingProtocol::TableChangeTracedCallback*')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t )&', u'ns3::olsr::RoutingProtocol::TableChangeTracedCallback&')
## olsr-routing-protocol.h (module 'olsr'): ns3::olsr::RoutingTableEntry [struct]
module.add_class('RoutingTableEntry')
## olsr-repositories.h (module 'olsr'): ns3::olsr::TopologyTuple [struct]
module.add_class('TopologyTuple')
## olsr-repositories.h (module 'olsr'): ns3::olsr::TwoHopNeighborTuple [struct]
module.add_class('TwoHopNeighborTuple')
module.add_container('std::vector< ns3::Ipv4Address >', 'ns3::Ipv4Address', container_type=u'vector')
module.add_container('std::vector< ns3::olsr::MessageHeader::Hello::LinkMessage >', 'ns3::olsr::MessageHeader::Hello::LinkMessage', container_type=u'vector')
module.add_container('std::vector< ns3::olsr::MessageHeader::Hna::Association >', 'ns3::olsr::MessageHeader::Hna::Association', container_type=u'vector')
module.add_container('std::vector< ns3::olsr::MprSelectorTuple >', 'ns3::olsr::MprSelectorTuple', container_type=u'vector')
module.add_container('std::vector< ns3::olsr::NeighborTuple >', 'ns3::olsr::NeighborTuple', container_type=u'vector')
module.add_container('std::vector< ns3::olsr::TwoHopNeighborTuple >', 'ns3::olsr::TwoHopNeighborTuple', container_type=u'vector')
module.add_container('ns3::olsr::MprSet', 'ns3::Ipv4Address', container_type=u'set')
module.add_container('std::vector< ns3::olsr::LinkTuple >', 'ns3::olsr::LinkTuple', container_type=u'vector')
module.add_container('std::vector< ns3::olsr::TopologyTuple >', 'ns3::olsr::TopologyTuple', container_type=u'vector')
module.add_container('std::vector< ns3::olsr::IfaceAssocTuple >', 'ns3::olsr::IfaceAssocTuple', container_type=u'vector')
module.add_container('std::vector< ns3::olsr::AssociationTuple >', 'ns3::olsr::AssociationTuple', container_type=u'vector')
module.add_container('std::vector< ns3::olsr::Association >', 'ns3::olsr::Association', container_type=u'vector')
module.add_container('std::vector< ns3::olsr::RoutingTableEntry >', 'ns3::olsr::RoutingTableEntry', container_type=u'vector')
module.add_container('std::set< unsigned int >', 'unsigned int', container_type=u'set')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::MessageHeader >', u'ns3::olsr::MessageList')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::MessageHeader >*', u'ns3::olsr::MessageList*')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::MessageHeader >&', u'ns3::olsr::MessageList&')
typehandlers.add_type_alias(u'std::set< ns3::Ipv4Address >', u'ns3::olsr::MprSet')
typehandlers.add_type_alias(u'std::set< ns3::Ipv4Address >*', u'ns3::olsr::MprSet*')
typehandlers.add_type_alias(u'std::set< ns3::Ipv4Address >&', u'ns3::olsr::MprSet&')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::MprSelectorTuple >', u'ns3::olsr::MprSelectorSet')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::MprSelectorTuple >*', u'ns3::olsr::MprSelectorSet*')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::MprSelectorTuple >&', u'ns3::olsr::MprSelectorSet&')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::LinkTuple >', u'ns3::olsr::LinkSet')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::LinkTuple >*', u'ns3::olsr::LinkSet*')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::LinkTuple >&', u'ns3::olsr::LinkSet&')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::NeighborTuple >', u'ns3::olsr::NeighborSet')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::NeighborTuple >*', u'ns3::olsr::NeighborSet*')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::NeighborTuple >&', u'ns3::olsr::NeighborSet&')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::TwoHopNeighborTuple >', u'ns3::olsr::TwoHopNeighborSet')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::TwoHopNeighborTuple >*', u'ns3::olsr::TwoHopNeighborSet*')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::TwoHopNeighborTuple >&', u'ns3::olsr::TwoHopNeighborSet&')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::TopologyTuple >', u'ns3::olsr::TopologySet')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::TopologyTuple >*', u'ns3::olsr::TopologySet*')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::TopologyTuple >&', u'ns3::olsr::TopologySet&')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::DuplicateTuple >', u'ns3::olsr::DuplicateSet')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::DuplicateTuple >*', u'ns3::olsr::DuplicateSet*')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::DuplicateTuple >&', u'ns3::olsr::DuplicateSet&')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::IfaceAssocTuple >', u'ns3::olsr::IfaceAssocSet')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::IfaceAssocTuple >*', u'ns3::olsr::IfaceAssocSet*')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::IfaceAssocTuple >&', u'ns3::olsr::IfaceAssocSet&')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::AssociationTuple >', u'ns3::olsr::AssociationSet')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::AssociationTuple >*', u'ns3::olsr::AssociationSet*')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::AssociationTuple >&', u'ns3::olsr::AssociationSet&')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::Association >', u'ns3::olsr::Associations')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::Association >*', u'ns3::olsr::Associations*')
typehandlers.add_type_alias(u'std::vector< ns3::olsr::Association >&', u'ns3::olsr::Associations&')
def register_types_ns3_tests(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeAccessor >'])
register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeChecker >'])
register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeValue >'])
register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, root_module['ns3::DefaultDeleter< ns3::CallbackImplBase >'])
register_Ns3DefaultDeleter__Ns3EventImpl_methods(root_module, root_module['ns3::DefaultDeleter< ns3::EventImpl >'])
register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Hash::Implementation >'])
register_Ns3DefaultDeleter__Ns3NixVector_methods(root_module, root_module['ns3::DefaultDeleter< ns3::NixVector >'])
register_Ns3DefaultDeleter__Ns3Packet_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Packet >'])
register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::TraceSourceAccessor >'])
register_Ns3EventGarbageCollector_methods(root_module, root_module['ns3::EventGarbageCollector'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3Inet6SocketAddress_methods(root_module, root_module['ns3::Inet6SocketAddress'])
register_Ns3InetSocketAddress_methods(root_module, root_module['ns3::InetSocketAddress'])
register_Ns3IntToType__0_methods(root_module, root_module['ns3::IntToType< 0 >'])
register_Ns3IntToType__1_methods(root_module, root_module['ns3::IntToType< 1 >'])
register_Ns3IntToType__2_methods(root_module, root_module['ns3::IntToType< 2 >'])
register_Ns3IntToType__3_methods(root_module, root_module['ns3::IntToType< 3 >'])
register_Ns3IntToType__4_methods(root_module, root_module['ns3::IntToType< 4 >'])
register_Ns3IntToType__5_methods(root_module, root_module['ns3::IntToType< 5 >'])
register_Ns3IntToType__6_methods(root_module, root_module['ns3::IntToType< 6 >'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4InterfaceAddress_methods(root_module, root_module['ns3::Ipv4InterfaceAddress'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv4RoutingHelper_methods(root_module, root_module['ns3::Ipv4RoutingHelper'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address'])
register_Ns3Mac8Address_methods(root_module, root_module['ns3::Mac8Address'])
register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer'])
register_Ns3NonCopyable_methods(root_module, root_module['ns3::NonCopyable'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3OlsrHelper_methods(root_module, root_module['ns3::OlsrHelper'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])
register_Ns3SystemWallClockMs_methods(root_module, root_module['ns3::SystemWallClockMs'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit'])
register_Ns3Timer_methods(root_module, root_module['ns3::Timer'])
register_Ns3TimerImpl_methods(root_module, root_module['ns3::TimerImpl'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3Ipv4Header_methods(root_module, root_module['ns3::Ipv4Header'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3RandomVariableStream_methods(root_module, root_module['ns3::RandomVariableStream'])
register_Ns3SequentialRandomVariable_methods(root_module, root_module['ns3::SequentialRandomVariable'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3Ipv4MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4MulticastRoute__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >'])
register_Ns3SimpleRefCount__Ns3Ipv4Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4Route__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3Socket_methods(root_module, root_module['ns3::Socket'])
register_Ns3SocketIpTosTag_methods(root_module, root_module['ns3::SocketIpTosTag'])
register_Ns3SocketIpTtlTag_methods(root_module, root_module['ns3::SocketIpTtlTag'])
register_Ns3SocketIpv6HopLimitTag_methods(root_module, root_module['ns3::SocketIpv6HopLimitTag'])
register_Ns3SocketIpv6TclassTag_methods(root_module, root_module['ns3::SocketIpv6TclassTag'])
register_Ns3SocketPriorityTag_methods(root_module, root_module['ns3::SocketPriorityTag'])
register_Ns3SocketSetDontFragmentTag_methods(root_module, root_module['ns3::SocketSetDontFragmentTag'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3TriangularRandomVariable_methods(root_module, root_module['ns3::TriangularRandomVariable'])
register_Ns3UniformRandomVariable_methods(root_module, root_module['ns3::UniformRandomVariable'])
register_Ns3WeibullRandomVariable_methods(root_module, root_module['ns3::WeibullRandomVariable'])
register_Ns3ZetaRandomVariable_methods(root_module, root_module['ns3::ZetaRandomVariable'])
register_Ns3ZipfRandomVariable_methods(root_module, root_module['ns3::ZipfRandomVariable'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3ConstantRandomVariable_methods(root_module, root_module['ns3::ConstantRandomVariable'])
register_Ns3DeterministicRandomVariable_methods(root_module, root_module['ns3::DeterministicRandomVariable'])
register_Ns3EmpiricalRandomVariable_methods(root_module, root_module['ns3::EmpiricalRandomVariable'])
register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor'])
register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3ErlangRandomVariable_methods(root_module, root_module['ns3::ErlangRandomVariable'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3ExponentialRandomVariable_methods(root_module, root_module['ns3::ExponentialRandomVariable'])
register_Ns3GammaRandomVariable_methods(root_module, root_module['ns3::GammaRandomVariable'])
register_Ns3Ipv4_methods(root_module, root_module['ns3::Ipv4'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv4MulticastRoute_methods(root_module, root_module['ns3::Ipv4MulticastRoute'])
register_Ns3Ipv4Route_methods(root_module, root_module['ns3::Ipv4Route'])
register_Ns3Ipv4RoutingProtocol_methods(root_module, root_module['ns3::Ipv4RoutingProtocol'])
register_Ns3Ipv4StaticRouting_methods(root_module, root_module['ns3::Ipv4StaticRouting'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3LogNormalRandomVariable_methods(root_module, root_module['ns3::LogNormalRandomVariable'])
register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker'])
register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3Node_methods(root_module, root_module['ns3::Node'])
register_Ns3NormalRandomVariable_methods(root_module, root_module['ns3::NormalRandomVariable'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3ParetoRandomVariable_methods(root_module, root_module['ns3::ParetoRandomVariable'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
register_Ns3CallbackImpl__Bool_Ns3Ptr__lt__ns3Socket__gt___Const_ns3Address___amp___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< bool, ns3::Ptr<ns3::Socket>, const ns3::Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Const_ns3OlsrPacketHeader___amp___Const_stdVector__lt__ns3OlsrMessageHeader__stdAllocator__lt__ns3OlsrMessageHeader__gt_____gt_____amp___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, const ns3::olsr::PacketHeader &, const std::vector<ns3::olsr::MessageHeader, std::allocator<ns3::olsr::MessageHeader> > &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Ptr__lt__const_ns3Packet__gt___Unsigned_short_Const_ns3Address___amp___Const_ns3Address___amp___Ns3NetDevicePacketType_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3Socket__gt___Const_ns3Address___amp___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::Socket>, const ns3::Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3Socket__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3Socket__gt___Unsigned_int_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Unsigned_int_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3Ipv4ListRouting_methods(root_module, root_module['ns3::Ipv4ListRouting'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
register_Ns3OlsrAssociation_methods(root_module, root_module['ns3::olsr::Association'])
register_Ns3OlsrAssociationTuple_methods(root_module, root_module['ns3::olsr::AssociationTuple'])
register_Ns3OlsrDuplicateTuple_methods(root_module, root_module['ns3::olsr::DuplicateTuple'])
register_Ns3OlsrIfaceAssocTuple_methods(root_module, root_module['ns3::olsr::IfaceAssocTuple'])
register_Ns3OlsrLinkTuple_methods(root_module, root_module['ns3::olsr::LinkTuple'])
register_Ns3OlsrMessageHeader_methods(root_module, root_module['ns3::olsr::MessageHeader'])
register_Ns3OlsrMessageHeaderHello_methods(root_module, root_module['ns3::olsr::MessageHeader::Hello'])
register_Ns3OlsrMessageHeaderHelloLinkMessage_methods(root_module, root_module['ns3::olsr::MessageHeader::Hello::LinkMessage'])
register_Ns3OlsrMessageHeaderHna_methods(root_module, root_module['ns3::olsr::MessageHeader::Hna'])
register_Ns3OlsrMessageHeaderHnaAssociation_methods(root_module, root_module['ns3::olsr::MessageHeader::Hna::Association'])
register_Ns3OlsrMessageHeaderMid_methods(root_module, root_module['ns3::olsr::MessageHeader::Mid'])
register_Ns3OlsrMessageHeaderTc_methods(root_module, root_module['ns3::olsr::MessageHeader::Tc'])
register_Ns3OlsrMprSelectorTuple_methods(root_module, root_module['ns3::olsr::MprSelectorTuple'])
register_Ns3OlsrNeighborTuple_methods(root_module, root_module['ns3::olsr::NeighborTuple'])
register_Ns3OlsrOlsrState_methods(root_module, root_module['ns3::olsr::OlsrState'])
register_Ns3OlsrPacketHeader_methods(root_module, root_module['ns3::olsr::PacketHeader'])
register_Ns3OlsrRoutingProtocol_methods(root_module, root_module['ns3::olsr::RoutingProtocol'])
register_Ns3OlsrRoutingTableEntry_methods(root_module, root_module['ns3::olsr::RoutingTableEntry'])
register_Ns3OlsrTopologyTuple_methods(root_module, root_module['ns3::olsr::TopologyTuple'])
register_Ns3OlsrTwoHopNeighborTuple_methods(root_module, root_module['ns3::olsr::TwoHopNeighborTuple'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
cls.add_output_stream_operator()
## address.h (module 'network'): ns3::Address::Address() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [constructor]
cls.add_constructor([param('ns3::Address const &', 'address')])
## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]
cls.add_method('CheckCompatible',
'bool',
[param('uint8_t', 'type'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyAllFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]
cls.add_method('CopyAllTo',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'uint32_t',
[param('uint8_t *', 'buffer')],
is_const=True)
## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'buffer')])
## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]
cls.add_method('IsInvalid',
'bool',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]
cls.add_method('IsMatchingType',
'bool',
[param('uint8_t', 'type')],
is_const=True)
## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]
cls.add_method('Register',
'uint8_t',
[],
is_static=True)
## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'buffer')],
is_const=True)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::CIterator ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'ns3::AttributeConstructionList::CIterator',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::CIterator ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'ns3::AttributeConstructionList::CIterator',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetRemainingSize() const [member function]
cls.add_method('GetRemainingSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::PeekU8() [member function]
cls.add_method('PeekU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(ns3::Buffer::Iterator start, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [constructor]
cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagIterator::Item',
[])
return
def register_Ns3ByteTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]
cls.add_method('GetEnd',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]
cls.add_method('GetStart',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor]
cls.add_constructor([])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [constructor]
cls.add_constructor([param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]
cls.add_method('Add',
'ns3::TagBuffer',
[param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t appendOffset) [member function]
cls.add_method('AddAtEnd',
'void',
[param('int32_t', 'appendOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t prependOffset) [member function]
cls.add_method('AddAtStart',
'void',
[param('int32_t', 'prependOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Adjust(int32_t adjustment) [member function]
cls.add_method('Adjust',
'void',
[param('int32_t', 'adjustment')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]
cls.add_method('Begin',
'ns3::ByteTagList::Iterator',
[param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')],
is_const=True)
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3ByteTagListIterator_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]
cls.add_method('GetOffsetStart',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagList::Iterator::Item',
[])
return
def register_Ns3ByteTagListIteratorItem_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]
cls.add_constructor([param('ns3::TagBuffer', 'buf')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable]
cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable]
cls.add_instance_attribute('end', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable]
cls.add_instance_attribute('start', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
return
def register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeAccessor> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeAccessor > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeAccessor>::Delete(ns3::AttributeAccessor * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::AttributeAccessor *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeChecker> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeChecker > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeChecker>::Delete(ns3::AttributeChecker * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::AttributeChecker *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeValue> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeValue > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeValue>::Delete(ns3::AttributeValue * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::AttributeValue *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase>::DefaultDeleter(ns3::DefaultDeleter<ns3::CallbackImplBase> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::CallbackImplBase > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::CallbackImplBase>::Delete(ns3::CallbackImplBase * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::CallbackImplBase *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3EventImpl_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::EventImpl>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::EventImpl>::DefaultDeleter(ns3::DefaultDeleter<ns3::EventImpl> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::EventImpl > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::EventImpl>::Delete(ns3::EventImpl * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::EventImpl *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation>::DefaultDeleter(ns3::DefaultDeleter<ns3::Hash::Implementation> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::Hash::Implementation > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::Hash::Implementation>::Delete(ns3::Hash::Implementation * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Hash::Implementation *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3NixVector_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::NixVector>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::NixVector>::DefaultDeleter(ns3::DefaultDeleter<ns3::NixVector> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::NixVector > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::NixVector>::Delete(ns3::NixVector * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::NixVector *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3Packet_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Packet>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Packet>::DefaultDeleter(ns3::DefaultDeleter<ns3::Packet> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::Packet > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::Packet>::Delete(ns3::Packet * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Packet *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor>::DefaultDeleter(ns3::DefaultDeleter<ns3::TraceSourceAccessor> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::TraceSourceAccessor > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::TraceSourceAccessor>::Delete(ns3::TraceSourceAccessor * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::TraceSourceAccessor *', 'object')],
is_static=True)
return
def register_Ns3EventGarbageCollector_methods(root_module, cls):
## event-garbage-collector.h (module 'core'): ns3::EventGarbageCollector::EventGarbageCollector() [constructor]
cls.add_constructor([])
## event-garbage-collector.h (module 'core'): void ns3::EventGarbageCollector::Track(ns3::EventId event) [member function]
cls.add_method('Track',
'void',
[param('ns3::EventId', 'event')])
## event-garbage-collector.h (module 'core'): ns3::EventGarbageCollector::EventGarbageCollector(ns3::EventGarbageCollector const & arg0) [constructor]
cls.add_constructor([param('ns3::EventGarbageCollector const &', 'arg0')])
return
def register_Ns3EventId_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [constructor]
cls.add_constructor([param('ns3::EventId const &', 'arg0')])
## event-id.h (module 'core'): ns3::EventId::EventId() [constructor]
cls.add_constructor([])
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])
## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function]
cls.add_method('GetTs',
'uint64_t',
[],
is_const=True)
## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function]
cls.add_method('PeekEventImpl',
'ns3::EventImpl *',
[],
is_const=True)
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3Inet6SocketAddress_methods(root_module, cls):
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(ns3::Inet6SocketAddress const & arg0) [constructor]
cls.add_constructor([param('ns3::Inet6SocketAddress const &', 'arg0')])
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(ns3::Ipv6Address ipv6, uint16_t port) [constructor]
cls.add_constructor([param('ns3::Ipv6Address', 'ipv6'), param('uint16_t', 'port')])
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(ns3::Ipv6Address ipv6) [constructor]
cls.add_constructor([param('ns3::Ipv6Address', 'ipv6')])
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(uint16_t port) [constructor]
cls.add_constructor([param('uint16_t', 'port')])
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(char const * ipv6, uint16_t port) [constructor]
cls.add_constructor([param('char const *', 'ipv6'), param('uint16_t', 'port')])
## inet6-socket-address.h (module 'network'): ns3::Inet6SocketAddress::Inet6SocketAddress(char const * ipv6) [constructor]
cls.add_constructor([param('char const *', 'ipv6')])
## inet6-socket-address.h (module 'network'): static ns3::Inet6SocketAddress ns3::Inet6SocketAddress::ConvertFrom(ns3::Address const & addr) [member function]
cls.add_method('ConvertFrom',
'ns3::Inet6SocketAddress',
[param('ns3::Address const &', 'addr')],
is_static=True)
## inet6-socket-address.h (module 'network'): ns3::Ipv6Address ns3::Inet6SocketAddress::GetIpv6() const [member function]
cls.add_method('GetIpv6',
'ns3::Ipv6Address',
[],
is_const=True)
## inet6-socket-address.h (module 'network'): uint16_t ns3::Inet6SocketAddress::GetPort() const [member function]
cls.add_method('GetPort',
'uint16_t',
[],
is_const=True)
## inet6-socket-address.h (module 'network'): static bool ns3::Inet6SocketAddress::IsMatchingType(ns3::Address const & addr) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'addr')],
is_static=True)
## inet6-socket-address.h (module 'network'): void ns3::Inet6SocketAddress::SetIpv6(ns3::Ipv6Address ipv6) [member function]
cls.add_method('SetIpv6',
'void',
[param('ns3::Ipv6Address', 'ipv6')])
## inet6-socket-address.h (module 'network'): void ns3::Inet6SocketAddress::SetPort(uint16_t port) [member function]
cls.add_method('SetPort',
'void',
[param('uint16_t', 'port')])
return
def register_Ns3InetSocketAddress_methods(root_module, cls):
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(ns3::InetSocketAddress const & arg0) [constructor]
cls.add_constructor([param('ns3::InetSocketAddress const &', 'arg0')])
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(ns3::Ipv4Address ipv4, uint16_t port) [constructor]
cls.add_constructor([param('ns3::Ipv4Address', 'ipv4'), param('uint16_t', 'port')])
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(ns3::Ipv4Address ipv4) [constructor]
cls.add_constructor([param('ns3::Ipv4Address', 'ipv4')])
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(uint16_t port) [constructor]
cls.add_constructor([param('uint16_t', 'port')])
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(char const * ipv4, uint16_t port) [constructor]
cls.add_constructor([param('char const *', 'ipv4'), param('uint16_t', 'port')])
## inet-socket-address.h (module 'network'): ns3::InetSocketAddress::InetSocketAddress(char const * ipv4) [constructor]
cls.add_constructor([param('char const *', 'ipv4')])
## inet-socket-address.h (module 'network'): static ns3::InetSocketAddress ns3::InetSocketAddress::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::InetSocketAddress',
[param('ns3::Address const &', 'address')],
is_static=True)
## inet-socket-address.h (module 'network'): ns3::Ipv4Address ns3::InetSocketAddress::GetIpv4() const [member function]
cls.add_method('GetIpv4',
'ns3::Ipv4Address',
[],
is_const=True)
## inet-socket-address.h (module 'network'): uint16_t ns3::InetSocketAddress::GetPort() const [member function]
cls.add_method('GetPort',
'uint16_t',
[],
is_const=True)
## inet-socket-address.h (module 'network'): uint8_t ns3::InetSocketAddress::GetTos() const [member function]
cls.add_method('GetTos',
'uint8_t',
[],
is_const=True)
## inet-socket-address.h (module 'network'): static bool ns3::InetSocketAddress::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## inet-socket-address.h (module 'network'): void ns3::InetSocketAddress::SetIpv4(ns3::Ipv4Address address) [member function]
cls.add_method('SetIpv4',
'void',
[param('ns3::Ipv4Address', 'address')])
## inet-socket-address.h (module 'network'): void ns3::InetSocketAddress::SetPort(uint16_t port) [member function]
cls.add_method('SetPort',
'void',
[param('uint16_t', 'port')])
## inet-socket-address.h (module 'network'): void ns3::InetSocketAddress::SetTos(uint8_t tos) [member function]
cls.add_method('SetTos',
'void',
[param('uint8_t', 'tos')])
return
def register_Ns3IntToType__0_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<0>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<0>::IntToType(ns3::IntToType<0> const & arg0) [constructor]
cls.add_constructor([param('ns3::IntToType< 0 > const &', 'arg0')])
return
def register_Ns3IntToType__1_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<1>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<1>::IntToType(ns3::IntToType<1> const & arg0) [constructor]
cls.add_constructor([param('ns3::IntToType< 1 > const &', 'arg0')])
return
def register_Ns3IntToType__2_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<2>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<2>::IntToType(ns3::IntToType<2> const & arg0) [constructor]
cls.add_constructor([param('ns3::IntToType< 2 > const &', 'arg0')])
return
def register_Ns3IntToType__3_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<3>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<3>::IntToType(ns3::IntToType<3> const & arg0) [constructor]
cls.add_constructor([param('ns3::IntToType< 3 > const &', 'arg0')])
return
def register_Ns3IntToType__4_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<4>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<4>::IntToType(ns3::IntToType<4> const & arg0) [constructor]
cls.add_constructor([param('ns3::IntToType< 4 > const &', 'arg0')])
return
def register_Ns3IntToType__5_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<5>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<5>::IntToType(ns3::IntToType<5> const & arg0) [constructor]
cls.add_constructor([param('ns3::IntToType< 5 > const &', 'arg0')])
return
def register_Ns3IntToType__6_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<6>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<6>::IntToType(ns3::IntToType<6> const & arg0) [constructor]
cls.add_constructor([param('ns3::IntToType< 6 > const &', 'arg0')])
return
def register_Ns3Ipv4Address_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]
cls.add_constructor([param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('CombineMask',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv4Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv4Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('GetSubnetDirectedBroadcast',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Address const &', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]
cls.add_method('IsLocalMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('IsSubnetDirectedBroadcast',
'bool',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
return
def register_Ns3Ipv4InterfaceAddress_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress() [constructor]
cls.add_constructor([])
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress(ns3::Ipv4Address local, ns3::Ipv4Mask mask) [constructor]
cls.add_constructor([param('ns3::Ipv4Address', 'local'), param('ns3::Ipv4Mask', 'mask')])
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::Ipv4InterfaceAddress(ns3::Ipv4InterfaceAddress const & o) [constructor]
cls.add_constructor([param('ns3::Ipv4InterfaceAddress const &', 'o')])
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4InterfaceAddress::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4InterfaceAddress::GetLocal() const [member function]
cls.add_method('GetLocal',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4Mask ns3::Ipv4InterfaceAddress::GetMask() const [member function]
cls.add_method('GetMask',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-interface-address.h (module 'internet'): ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e ns3::Ipv4InterfaceAddress::GetScope() const [member function]
cls.add_method('GetScope',
'ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e',
[],
is_const=True)
## ipv4-interface-address.h (module 'internet'): bool ns3::Ipv4InterfaceAddress::IsSecondary() const [member function]
cls.add_method('IsSecondary',
'bool',
[],
is_const=True)
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetBroadcast(ns3::Ipv4Address broadcast) [member function]
cls.add_method('SetBroadcast',
'void',
[param('ns3::Ipv4Address', 'broadcast')])
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetLocal(ns3::Ipv4Address local) [member function]
cls.add_method('SetLocal',
'void',
[param('ns3::Ipv4Address', 'local')])
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetMask(ns3::Ipv4Mask mask) [member function]
cls.add_method('SetMask',
'void',
[param('ns3::Ipv4Mask', 'mask')])
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetPrimary() [member function]
cls.add_method('SetPrimary',
'void',
[])
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetScope(ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e scope) [member function]
cls.add_method('SetScope',
'void',
[param('ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', 'scope')])
## ipv4-interface-address.h (module 'internet'): void ns3::Ipv4InterfaceAddress::SetSecondary() [member function]
cls.add_method('SetSecondary',
'void',
[])
return
def register_Ns3Ipv4Mask_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]
cls.add_constructor([param('uint32_t', 'mask')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]
cls.add_constructor([param('char const *', 'mask')])
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]
cls.add_method('GetInverse',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint16_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Mask', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'mask')])
return
def register_Ns3Ipv4RoutingHelper_methods(root_module, cls):
## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper::Ipv4RoutingHelper() [constructor]
cls.add_constructor([])
## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper::Ipv4RoutingHelper(ns3::Ipv4RoutingHelper const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4RoutingHelper const &', 'arg0')])
## ipv4-routing-helper.h (module 'internet'): ns3::Ipv4RoutingHelper * ns3::Ipv4RoutingHelper::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ipv4RoutingHelper *',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4-routing-helper.h (module 'internet'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4RoutingHelper::Create(ns3::Ptr<ns3::Node> node) const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Ipv4RoutingProtocol >',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintNeighborCacheAllAt(ns3::Time printTime, ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function]
cls.add_method('PrintNeighborCacheAllAt',
'void',
[param('ns3::Time', 'printTime'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')],
is_static=True)
## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintNeighborCacheAllEvery(ns3::Time printInterval, ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function]
cls.add_method('PrintNeighborCacheAllEvery',
'void',
[param('ns3::Time', 'printInterval'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')],
is_static=True)
## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintNeighborCacheAt(ns3::Time printTime, ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function]
cls.add_method('PrintNeighborCacheAt',
'void',
[param('ns3::Time', 'printTime'), param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')],
is_static=True)
## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintNeighborCacheEvery(ns3::Time printInterval, ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function]
cls.add_method('PrintNeighborCacheEvery',
'void',
[param('ns3::Time', 'printInterval'), param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')],
is_static=True)
## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintRoutingTableAllAt(ns3::Time printTime, ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Time::Unit unit=::ns3::Time::Unit::S) [member function]
cls.add_method('PrintRoutingTableAllAt',
'void',
[param('ns3::Time', 'printTime'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Time::Unit', 'unit', default_value='::ns3::Time::Unit::S')],
is_static=True)
## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintRoutingTableAllEvery(ns3::Time printInterval, ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Time::Unit unit=::ns3::Time::Unit::S) [member function]
cls.add_method('PrintRoutingTableAllEvery',
'void',
[param('ns3::Time', 'printInterval'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Time::Unit', 'unit', default_value='::ns3::Time::Unit::S')],
is_static=True)
## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintRoutingTableAt(ns3::Time printTime, ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Time::Unit unit=::ns3::Time::Unit::S) [member function]
cls.add_method('PrintRoutingTableAt',
'void',
[param('ns3::Time', 'printTime'), param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Time::Unit', 'unit', default_value='::ns3::Time::Unit::S')],
is_static=True)
## ipv4-routing-helper.h (module 'internet'): static void ns3::Ipv4RoutingHelper::PrintRoutingTableEvery(ns3::Time printInterval, ns3::Ptr<ns3::Node> node, ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Time::Unit unit=::ns3::Time::Unit::S) [member function]
cls.add_method('PrintRoutingTableEvery',
'void',
[param('ns3::Time', 'printInterval'), param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Time::Unit', 'unit', default_value='::ns3::Time::Unit::S')],
is_static=True)
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]
cls.add_constructor([param('uint8_t *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function]
cls.add_method('CombinePrefix',
'ns3::Ipv6Address',
[param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv6Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv6Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]
cls.add_method('GetAllHostsMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]
cls.add_method('GetAllNodesMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]
cls.add_method('GetAllRoutersMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function]
cls.add_method('GetIpv4MappedAddress',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function]
cls.add_method('IsAllHostsMulticast',
'bool',
[],
deprecated=True, is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]
cls.add_method('IsAllNodesMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]
cls.add_method('IsAllRoutersMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function]
cls.add_method('IsDocumentation',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Address const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function]
cls.add_method('IsIpv4MappedAddress',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]
cls.add_method('IsLinkLocal',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function]
cls.add_method('IsLinkLocalMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]
cls.add_method('IsSolicitedMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac8Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac8Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac8Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac8Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function]
cls.add_method('MakeIpv4MappedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv4Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]
cls.add_method('MakeSolicitedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv6Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]
cls.add_method('Set',
'void',
[param('uint8_t *', 'address')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]
cls.add_constructor([param('char const *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]
cls.add_constructor([param('uint8_t', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Prefix const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
return
def register_Ns3Mac48Address_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
cls.add_output_stream_operator()
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor]
cls.add_constructor([param('char const *', 'str')])
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac48Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'buffer')])
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'buffer')],
is_const=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv4Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv6Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function]
cls.add_method('GetMulticast6Prefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function]
cls.add_method('GetMulticastPrefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function]
cls.add_method('IsGroup',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3Mac8Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
## mac8-address.h (module 'network'): ns3::Mac8Address::Mac8Address(ns3::Mac8Address const & arg0) [constructor]
cls.add_constructor([param('ns3::Mac8Address const &', 'arg0')])
## mac8-address.h (module 'network'): ns3::Mac8Address::Mac8Address() [constructor]
cls.add_constructor([])
## mac8-address.h (module 'network'): ns3::Mac8Address::Mac8Address(uint8_t addr) [constructor]
cls.add_constructor([param('uint8_t', 'addr')])
## mac8-address.h (module 'network'): static ns3::Mac8Address ns3::Mac8Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac8Address',
[],
is_static=True)
## mac8-address.h (module 'network'): static ns3::Mac8Address ns3::Mac8Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac8Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac8-address.h (module 'network'): void ns3::Mac8Address::CopyFrom(uint8_t const * pBuffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'pBuffer')])
## mac8-address.h (module 'network'): void ns3::Mac8Address::CopyTo(uint8_t * pBuffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'pBuffer')],
is_const=True)
## mac8-address.h (module 'network'): static ns3::Mac8Address ns3::Mac8Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Mac8Address',
[],
is_static=True)
## mac8-address.h (module 'network'): static bool ns3::Mac8Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3NodeContainer_methods(root_module, cls):
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor]
cls.add_constructor([])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor]
cls.add_constructor([param('std::string', 'nodeName')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NodeContainer', 'other')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'nodeName')])
## node-container.h (module 'network'): ns3::NodeContainer::Iterator ns3::NodeContainer::Begin() const [member function]
cls.add_method('Begin',
'ns3::NodeContainer::Iterator',
[],
is_const=True)
## node-container.h (module 'network'): bool ns3::NodeContainer::Contains(uint32_t id) const [member function]
cls.add_method('Contains',
'bool',
[param('uint32_t', 'id')],
is_const=True)
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n')])
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n'), param('uint32_t', 'systemId')])
## node-container.h (module 'network'): ns3::NodeContainer::Iterator ns3::NodeContainer::End() const [member function]
cls.add_method('End',
'ns3::NodeContainer::Iterator',
[],
is_const=True)
## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::Node >',
[param('uint32_t', 'i')],
is_const=True)
## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function]
cls.add_method('GetGlobal',
'ns3::NodeContainer',
[],
is_static=True)
## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3NonCopyable_methods(root_module, cls):
## non-copyable.h (module 'core'): ns3::NonCopyable::NonCopyable() [constructor]
cls.add_constructor([],
visibility='protected')
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]
cls.add_constructor([param('std::string', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3OlsrHelper_methods(root_module, cls):
## olsr-helper.h (module 'olsr'): ns3::OlsrHelper::OlsrHelper() [constructor]
cls.add_constructor([])
## olsr-helper.h (module 'olsr'): ns3::OlsrHelper::OlsrHelper(ns3::OlsrHelper const & arg0) [constructor]
cls.add_constructor([param('ns3::OlsrHelper const &', 'arg0')])
## olsr-helper.h (module 'olsr'): ns3::OlsrHelper * ns3::OlsrHelper::Copy() const [member function]
cls.add_method('Copy',
'ns3::OlsrHelper *',
[],
is_const=True, is_virtual=True)
## olsr-helper.h (module 'olsr'): void ns3::OlsrHelper::ExcludeInterface(ns3::Ptr<ns3::Node> node, uint32_t interface) [member function]
cls.add_method('ExcludeInterface',
'void',
[param('ns3::Ptr< ns3::Node >', 'node'), param('uint32_t', 'interface')])
## olsr-helper.h (module 'olsr'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::OlsrHelper::Create(ns3::Ptr<ns3::Node> node) const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Ipv4RoutingProtocol >',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_const=True, is_virtual=True)
## olsr-helper.h (module 'olsr'): void ns3::OlsrHelper::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## olsr-helper.h (module 'olsr'): int64_t ns3::OlsrHelper::AssignStreams(ns3::NodeContainer c, int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('ns3::NodeContainer', 'c'), param('int64_t', 'stream')])
return
def register_Ns3PacketMetadata_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]
cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[param('ns3::Buffer', 'buffer')],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]
cls.add_method('CreateFragment',
'ns3::PacketMetadata',
[param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function]
cls.add_method('Enable',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('RemoveTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor]
cls.add_constructor([])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable]
cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable]
cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]
cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable]
cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable]
cls.add_instance_attribute('isFragment', 'bool', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::type [variable]
cls.add_instance_attribute('type', 'ns3::PacketMetadata::Item::ItemType', is_const=False)
return
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketMetadata::Item',
[])
return
def register_Ns3PacketTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketTagIterator::Item',
[])
return
def register_Ns3PacketTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3PacketTagList_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [constructor]
cls.add_constructor([param('ns3::PacketTagList const &', 'o')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]
cls.add_method('Add',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]
cls.add_method('Head',
'ns3::PacketTagList::TagData const *',
[],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]
cls.add_method('Peek',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]
cls.add_method('Remove',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Replace(ns3::Tag & tag) [member function]
cls.add_method('Replace',
'bool',
[param('ns3::Tag &', 'tag')])
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [constructor]
cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable]
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable]
cls.add_instance_attribute('data', 'uint8_t [ 1 ]', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable]
cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
return
def register_Ns3Simulator_methods(root_module, cls):
## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [constructor]
cls.add_constructor([param('ns3::Simulator const &', 'arg0')])
## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function]
cls.add_method('GetImplementation',
'ns3::Ptr< ns3::SimulatorImpl >',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function]
cls.add_method('IsFinished',
'bool',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function]
cls.add_method('SetImplementation',
'void',
[param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & delay) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'delay')],
is_static=True)
return
def register_Ns3SystemWallClockMs_methods(root_module, cls):
## system-wall-clock-ms.h (module 'core'): ns3::SystemWallClockMs::SystemWallClockMs(ns3::SystemWallClockMs const & arg0) [constructor]
cls.add_constructor([param('ns3::SystemWallClockMs const &', 'arg0')])
## system-wall-clock-ms.h (module 'core'): ns3::SystemWallClockMs::SystemWallClockMs() [constructor]
cls.add_constructor([])
## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::End() [member function]
cls.add_method('End',
'int64_t',
[])
## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::GetElapsedReal() const [member function]
cls.add_method('GetElapsedReal',
'int64_t',
[],
is_const=True)
## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::GetElapsedSystem() const [member function]
cls.add_method('GetElapsedSystem',
'int64_t',
[],
is_const=True)
## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::GetElapsedUser() const [member function]
cls.add_method('GetElapsedUser',
'int64_t',
[],
is_const=True)
## system-wall-clock-ms.h (module 'core'): void ns3::SystemWallClockMs::Start() [member function]
cls.add_method('Start',
'void',
[])
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h (module 'network'): ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t v) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t v) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TimeWithUnit_methods(root_module, cls):
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [constructor]
cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor]
cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')])
return
def register_Ns3Timer_methods(root_module, cls):
## timer.h (module 'core'): ns3::Timer::Timer(ns3::Timer const & arg0) [constructor]
cls.add_constructor([param('ns3::Timer const &', 'arg0')])
## timer.h (module 'core'): ns3::Timer::Timer() [constructor]
cls.add_constructor([])
## timer.h (module 'core'): ns3::Timer::Timer(ns3::Timer::DestroyPolicy destroyPolicy) [constructor]
cls.add_constructor([param('ns3::Timer::DestroyPolicy', 'destroyPolicy')])
## timer.h (module 'core'): void ns3::Timer::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## timer.h (module 'core'): ns3::Time ns3::Timer::GetDelay() const [member function]
cls.add_method('GetDelay',
'ns3::Time',
[],
is_const=True)
## timer.h (module 'core'): ns3::Time ns3::Timer::GetDelayLeft() const [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[],
is_const=True)
## timer.h (module 'core'): ns3::Timer::State ns3::Timer::GetState() const [member function]
cls.add_method('GetState',
'ns3::Timer::State',
[],
is_const=True)
## timer.h (module 'core'): bool ns3::Timer::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## timer.h (module 'core'): bool ns3::Timer::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## timer.h (module 'core'): bool ns3::Timer::IsSuspended() const [member function]
cls.add_method('IsSuspended',
'bool',
[],
is_const=True)
## timer.h (module 'core'): void ns3::Timer::Remove() [member function]
cls.add_method('Remove',
'void',
[])
## timer.h (module 'core'): void ns3::Timer::Resume() [member function]
cls.add_method('Resume',
'void',
[])
## timer.h (module 'core'): void ns3::Timer::Schedule() [member function]
cls.add_method('Schedule',
'void',
[])
## timer.h (module 'core'): void ns3::Timer::Schedule(ns3::Time delay) [member function]
cls.add_method('Schedule',
'void',
[param('ns3::Time', 'delay')])
## timer.h (module 'core'): void ns3::Timer::SetDelay(ns3::Time const & delay) [member function]
cls.add_method('SetDelay',
'void',
[param('ns3::Time const &', 'delay')])
## timer.h (module 'core'): void ns3::Timer::Suspend() [member function]
cls.add_method('Suspend',
'void',
[])
return
def register_Ns3TimerImpl_methods(root_module, cls):
## timer-impl.h (module 'core'): ns3::TimerImpl::TimerImpl() [constructor]
cls.add_constructor([])
## timer-impl.h (module 'core'): ns3::TimerImpl::TimerImpl(ns3::TimerImpl const & arg0) [constructor]
cls.add_constructor([param('ns3::TimerImpl const &', 'arg0')])
## timer-impl.h (module 'core'): void ns3::TimerImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## timer-impl.h (module 'core'): ns3::EventId ns3::TimerImpl::Schedule(ns3::Time const & delay) [member function]
cls.add_method('Schedule',
'ns3::EventId',
[param('ns3::Time const &', 'delay')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<const ns3::AttributeAccessor> accessor, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<const ns3::AttributeAccessor> accessor, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<const ns3::TraceSourceAccessor> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')],
deprecated=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<const ns3::TraceSourceAccessor> accessor, std::string callback, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(std::size_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('std::size_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(std::size_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('std::size_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::hash_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'ns3::TypeId::hash_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint16_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint16_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint16_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint16_t',
[],
is_static=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function]
cls.add_method('GetSize',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(std::size_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('std::size_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(ns3::TypeId::hash_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(ns3::TypeId::hash_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<const ns3::TraceSourceAccessor> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): ns3::Ptr<const ns3::TraceSourceAccessor> ns3::TypeId::LookupTraceSourceByName(std::string name, ns3::TypeId::TraceSourceInformation * info) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name'), param('ns3::TypeId::TraceSourceInformation *', 'info')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(std::size_t i, ns3::Ptr<const ns3::AttributeValue> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('std::size_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function]
cls.add_method('SetSize',
'ns3::TypeId',
[param('std::size_t', 'size')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t uid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'uid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable]
cls.add_instance_attribute('callback', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('>=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right'))
cls.add_unary_numeric_operator('-')
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(double const value) [constructor]
cls.add_constructor([param('double const', 'value')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long double const value) [constructor]
cls.add_constructor([param('long double const', 'value')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(int const v) [constructor]
cls.add_constructor([param('int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long int const v) [constructor]
cls.add_constructor([param('long int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int const v) [constructor]
cls.add_constructor([param('long long int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int const v) [constructor]
cls.add_constructor([param('unsigned int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int const v) [constructor]
cls.add_constructor([param('long unsigned int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int const v) [constructor]
cls.add_constructor([param('long long unsigned int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t const hi, uint64_t const lo) [constructor]
cls.add_constructor([param('int64_t const', 'hi'), param('uint64_t const', 'lo')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-128.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-128.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-128.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-128.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t const v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t const', 'v')],
is_static=True)
## int64x64-128.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::implementation [variable]
cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True)
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')],
is_virtual=True)
## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h (module 'network'): ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Ipv4Header_methods(root_module, cls):
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::Ipv4Header(ns3::Ipv4Header const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4Header const &', 'arg0')])
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::Ipv4Header() [constructor]
cls.add_constructor([])
## ipv4-header.h (module 'internet'): uint32_t ns3::Ipv4Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## ipv4-header.h (module 'internet'): std::string ns3::Ipv4Header::DscpTypeToString(ns3::Ipv4Header::DscpType dscp) const [member function]
cls.add_method('DscpTypeToString',
'std::string',
[param('ns3::Ipv4Header::DscpType', 'dscp')],
is_const=True)
## ipv4-header.h (module 'internet'): std::string ns3::Ipv4Header::EcnTypeToString(ns3::Ipv4Header::EcnType ecn) const [member function]
cls.add_method('EcnTypeToString',
'std::string',
[param('ns3::Ipv4Header::EcnType', 'ecn')],
is_const=True)
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::EnableChecksum() [member function]
cls.add_method('EnableChecksum',
'void',
[])
## ipv4-header.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Header::GetDestination() const [member function]
cls.add_method('GetDestination',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::DscpType ns3::Ipv4Header::GetDscp() const [member function]
cls.add_method('GetDscp',
'ns3::Ipv4Header::DscpType',
[],
is_const=True)
## ipv4-header.h (module 'internet'): ns3::Ipv4Header::EcnType ns3::Ipv4Header::GetEcn() const [member function]
cls.add_method('GetEcn',
'ns3::Ipv4Header::EcnType',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetFragmentOffset() const [member function]
cls.add_method('GetFragmentOffset',
'uint16_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetIdentification() const [member function]
cls.add_method('GetIdentification',
'uint16_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): ns3::TypeId ns3::Ipv4Header::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## ipv4-header.h (module 'internet'): uint16_t ns3::Ipv4Header::GetPayloadSize() const [member function]
cls.add_method('GetPayloadSize',
'uint16_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetProtocol() const [member function]
cls.add_method('GetProtocol',
'uint8_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint32_t ns3::Ipv4Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## ipv4-header.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Header::GetSource() const [member function]
cls.add_method('GetSource',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetTos() const [member function]
cls.add_method('GetTos',
'uint8_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): uint8_t ns3::Ipv4Header::GetTtl() const [member function]
cls.add_method('GetTtl',
'uint8_t',
[],
is_const=True)
## ipv4-header.h (module 'internet'): static ns3::TypeId ns3::Ipv4Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsChecksumOk() const [member function]
cls.add_method('IsChecksumOk',
'bool',
[],
is_const=True)
## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsDontFragment() const [member function]
cls.add_method('IsDontFragment',
'bool',
[],
is_const=True)
## ipv4-header.h (module 'internet'): bool ns3::Ipv4Header::IsLastFragment() const [member function]
cls.add_method('IsLastFragment',
'bool',
[],
is_const=True)
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDestination(ns3::Ipv4Address destination) [member function]
cls.add_method('SetDestination',
'void',
[param('ns3::Ipv4Address', 'destination')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDontFragment() [member function]
cls.add_method('SetDontFragment',
'void',
[])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetDscp(ns3::Ipv4Header::DscpType dscp) [member function]
cls.add_method('SetDscp',
'void',
[param('ns3::Ipv4Header::DscpType', 'dscp')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetEcn(ns3::Ipv4Header::EcnType ecn) [member function]
cls.add_method('SetEcn',
'void',
[param('ns3::Ipv4Header::EcnType', 'ecn')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetFragmentOffset(uint16_t offsetBytes) [member function]
cls.add_method('SetFragmentOffset',
'void',
[param('uint16_t', 'offsetBytes')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetIdentification(uint16_t identification) [member function]
cls.add_method('SetIdentification',
'void',
[param('uint16_t', 'identification')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetLastFragment() [member function]
cls.add_method('SetLastFragment',
'void',
[])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetMayFragment() [member function]
cls.add_method('SetMayFragment',
'void',
[])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetMoreFragments() [member function]
cls.add_method('SetMoreFragments',
'void',
[])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetPayloadSize(uint16_t size) [member function]
cls.add_method('SetPayloadSize',
'void',
[param('uint16_t', 'size')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetProtocol(uint8_t num) [member function]
cls.add_method('SetProtocol',
'void',
[param('uint8_t', 'num')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetSource(ns3::Ipv4Address source) [member function]
cls.add_method('SetSource',
'void',
[param('ns3::Ipv4Address', 'source')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetTos(uint8_t tos) [member function]
cls.add_method('SetTos',
'void',
[param('uint8_t', 'tos')])
## ipv4-header.h (module 'internet'): void ns3::Ipv4Header::SetTtl(uint8_t ttl) [member function]
cls.add_method('SetTtl',
'void',
[param('uint8_t', 'ttl')])
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): bool ns3::Object::IsInitialized() const [member function]
cls.add_method('IsInitialized',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<const ns3::Object> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3RandomVariableStream_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::RandomVariableStream::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::RandomVariableStream::RandomVariableStream() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetStream(int64_t stream) [member function]
cls.add_method('SetStream',
'void',
[param('int64_t', 'stream')])
## random-variable-stream.h (module 'core'): int64_t ns3::RandomVariableStream::GetStream() const [member function]
cls.add_method('GetStream',
'int64_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetAntithetic(bool isAntithetic) [member function]
cls.add_method('SetAntithetic',
'void',
[param('bool', 'isAntithetic')])
## random-variable-stream.h (module 'core'): bool ns3::RandomVariableStream::IsAntithetic() const [member function]
cls.add_method('IsAntithetic',
'bool',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::RandomVariableStream::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_pure_virtual=True, is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::RandomVariableStream::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_pure_virtual=True, is_virtual=True)
## random-variable-stream.h (module 'core'): ns3::RngStream * ns3::RandomVariableStream::Peek() const [member function]
cls.add_method('Peek',
'ns3::RngStream *',
[],
is_const=True, visibility='protected')
return
def register_Ns3SequentialRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::SequentialRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable::SequentialRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): ns3::Ptr<ns3::RandomVariableStream> ns3::SequentialRandomVariable::GetIncrement() const [member function]
cls.add_method('GetIncrement',
'ns3::Ptr< ns3::RandomVariableStream >',
[],
is_const=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetConsecutive() const [member function]
cls.add_method('GetConsecutive',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3Ipv4MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4MulticastRoute__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter< ns3::Ipv4MulticastRoute > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3Ipv4Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4Route__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter< ns3::Ipv4Route > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount(ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter< ns3::OutputStreamWrapper > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
return
def register_Ns3Socket_methods(root_module, cls):
## socket.h (module 'network'): ns3::Socket::Socket(ns3::Socket const & arg0) [constructor]
cls.add_constructor([param('ns3::Socket const &', 'arg0')])
## socket.h (module 'network'): ns3::Socket::Socket() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): int ns3::Socket::Bind(ns3::Address const & address) [member function]
cls.add_method('Bind',
'int',
[param('ns3::Address const &', 'address')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Bind() [member function]
cls.add_method('Bind',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Bind6() [member function]
cls.add_method('Bind6',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::BindToNetDevice(ns3::Ptr<ns3::NetDevice> netdevice) [member function]
cls.add_method('BindToNetDevice',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'netdevice')],
is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Close() [member function]
cls.add_method('Close',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Connect(ns3::Address const & address) [member function]
cls.add_method('Connect',
'int',
[param('ns3::Address const &', 'address')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): static ns3::Ptr<ns3::Socket> ns3::Socket::CreateSocket(ns3::Ptr<ns3::Node> node, ns3::TypeId tid) [member function]
cls.add_method('CreateSocket',
'ns3::Ptr< ns3::Socket >',
[param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::TypeId', 'tid')],
is_static=True)
## socket.h (module 'network'): bool ns3::Socket::GetAllowBroadcast() const [member function]
cls.add_method('GetAllowBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Socket::GetBoundNetDevice() [member function]
cls.add_method('GetBoundNetDevice',
'ns3::Ptr< ns3::NetDevice >',
[])
## socket.h (module 'network'): ns3::Socket::SocketErrno ns3::Socket::GetErrno() const [member function]
cls.add_method('GetErrno',
'ns3::Socket::SocketErrno',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::Socket::GetIpTos() const [member function]
cls.add_method('GetIpTos',
'uint8_t',
[],
is_const=True)
## socket.h (module 'network'): uint8_t ns3::Socket::GetIpTtl() const [member function]
cls.add_method('GetIpTtl',
'uint8_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::Socket::GetIpv6HopLimit() const [member function]
cls.add_method('GetIpv6HopLimit',
'uint8_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::Socket::GetIpv6Tclass() const [member function]
cls.add_method('GetIpv6Tclass',
'uint8_t',
[],
is_const=True)
## socket.h (module 'network'): ns3::Ptr<ns3::Node> ns3::Socket::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::GetPeerName(ns3::Address & address) const [member function]
cls.add_method('GetPeerName',
'int',
[param('ns3::Address &', 'address')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::Socket::GetPriority() const [member function]
cls.add_method('GetPriority',
'uint8_t',
[],
is_const=True)
## socket.h (module 'network'): uint32_t ns3::Socket::GetRxAvailable() const [member function]
cls.add_method('GetRxAvailable',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::GetSockName(ns3::Address & address) const [member function]
cls.add_method('GetSockName',
'int',
[param('ns3::Address &', 'address')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): ns3::Socket::SocketType ns3::Socket::GetSocketType() const [member function]
cls.add_method('GetSocketType',
'ns3::Socket::SocketType',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::Socket::GetTxAvailable() const [member function]
cls.add_method('GetTxAvailable',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## socket.h (module 'network'): static ns3::TypeId ns3::Socket::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): static uint8_t ns3::Socket::IpTos2Priority(uint8_t ipTos) [member function]
cls.add_method('IpTos2Priority',
'uint8_t',
[param('uint8_t', 'ipTos')],
is_static=True)
## socket.h (module 'network'): void ns3::Socket::Ipv6JoinGroup(ns3::Ipv6Address address, ns3::Socket::Ipv6MulticastFilterMode filterMode, std::vector<ns3::Ipv6Address, std::allocator<ns3::Ipv6Address> > sourceAddresses) [member function]
cls.add_method('Ipv6JoinGroup',
'void',
[param('ns3::Ipv6Address', 'address'), param('ns3::Socket::Ipv6MulticastFilterMode', 'filterMode'), param('std::vector< ns3::Ipv6Address >', 'sourceAddresses')],
is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::Ipv6JoinGroup(ns3::Ipv6Address address) [member function]
cls.add_method('Ipv6JoinGroup',
'void',
[param('ns3::Ipv6Address', 'address')],
is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::Ipv6LeaveGroup() [member function]
cls.add_method('Ipv6LeaveGroup',
'void',
[],
is_virtual=True)
## socket.h (module 'network'): bool ns3::Socket::IsIpRecvTos() const [member function]
cls.add_method('IsIpRecvTos',
'bool',
[],
is_const=True)
## socket.h (module 'network'): bool ns3::Socket::IsIpRecvTtl() const [member function]
cls.add_method('IsIpRecvTtl',
'bool',
[],
is_const=True)
## socket.h (module 'network'): bool ns3::Socket::IsIpv6RecvHopLimit() const [member function]
cls.add_method('IsIpv6RecvHopLimit',
'bool',
[],
is_const=True)
## socket.h (module 'network'): bool ns3::Socket::IsIpv6RecvTclass() const [member function]
cls.add_method('IsIpv6RecvTclass',
'bool',
[],
is_const=True)
## socket.h (module 'network'): bool ns3::Socket::IsRecvPktInfo() const [member function]
cls.add_method('IsRecvPktInfo',
'bool',
[],
is_const=True)
## socket.h (module 'network'): int ns3::Socket::Listen() [member function]
cls.add_method('Listen',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::Recv(uint32_t maxSize, uint32_t flags) [member function]
cls.add_method('Recv',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'maxSize'), param('uint32_t', 'flags')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::Recv() [member function]
cls.add_method('Recv',
'ns3::Ptr< ns3::Packet >',
[])
## socket.h (module 'network'): int ns3::Socket::Recv(uint8_t * buf, uint32_t size, uint32_t flags) [member function]
cls.add_method('Recv',
'int',
[param('uint8_t *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags')])
## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::RecvFrom(uint32_t maxSize, uint32_t flags, ns3::Address & fromAddress) [member function]
cls.add_method('RecvFrom',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'maxSize'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Socket::RecvFrom(ns3::Address & fromAddress) [member function]
cls.add_method('RecvFrom',
'ns3::Ptr< ns3::Packet >',
[param('ns3::Address &', 'fromAddress')])
## socket.h (module 'network'): int ns3::Socket::RecvFrom(uint8_t * buf, uint32_t size, uint32_t flags, ns3::Address & fromAddress) [member function]
cls.add_method('RecvFrom',
'int',
[param('uint8_t *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags'), param('ns3::Address &', 'fromAddress')])
## socket.h (module 'network'): int ns3::Socket::Send(ns3::Ptr<ns3::Packet> p, uint32_t flags) [member function]
cls.add_method('Send',
'int',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::Send(ns3::Ptr<ns3::Packet> p) [member function]
cls.add_method('Send',
'int',
[param('ns3::Ptr< ns3::Packet >', 'p')])
## socket.h (module 'network'): int ns3::Socket::Send(uint8_t const * buf, uint32_t size, uint32_t flags) [member function]
cls.add_method('Send',
'int',
[param('uint8_t const *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags')])
## socket.h (module 'network'): int ns3::Socket::SendTo(ns3::Ptr<ns3::Packet> p, uint32_t flags, ns3::Address const & toAddress) [member function]
cls.add_method('SendTo',
'int',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('uint32_t', 'flags'), param('ns3::Address const &', 'toAddress')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::SendTo(uint8_t const * buf, uint32_t size, uint32_t flags, ns3::Address const & address) [member function]
cls.add_method('SendTo',
'int',
[param('uint8_t const *', 'buf'), param('uint32_t', 'size'), param('uint32_t', 'flags'), param('ns3::Address const &', 'address')])
## socket.h (module 'network'): void ns3::Socket::SetAcceptCallback(ns3::Callback<bool, ns3::Ptr<ns3::Socket>, const ns3::Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionRequest, ns3::Callback<void, ns3::Ptr<ns3::Socket>, const ns3::Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> newConnectionCreated) [member function]
cls.add_method('SetAcceptCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionRequest'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'newConnectionCreated')])
## socket.h (module 'network'): bool ns3::Socket::SetAllowBroadcast(bool allowBroadcast) [member function]
cls.add_method('SetAllowBroadcast',
'bool',
[param('bool', 'allowBroadcast')],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::SetCloseCallbacks(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> normalClose, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> errorClose) [member function]
cls.add_method('SetCloseCallbacks',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'normalClose'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'errorClose')])
## socket.h (module 'network'): void ns3::Socket::SetConnectCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionSucceeded, ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> connectionFailed) [member function]
cls.add_method('SetConnectCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionSucceeded'), param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'connectionFailed')])
## socket.h (module 'network'): void ns3::Socket::SetDataSentCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> dataSent) [member function]
cls.add_method('SetDataSentCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'dataSent')])
## socket.h (module 'network'): void ns3::Socket::SetIpRecvTos(bool ipv4RecvTos) [member function]
cls.add_method('SetIpRecvTos',
'void',
[param('bool', 'ipv4RecvTos')])
## socket.h (module 'network'): void ns3::Socket::SetIpRecvTtl(bool ipv4RecvTtl) [member function]
cls.add_method('SetIpRecvTtl',
'void',
[param('bool', 'ipv4RecvTtl')])
## socket.h (module 'network'): void ns3::Socket::SetIpTos(uint8_t ipTos) [member function]
cls.add_method('SetIpTos',
'void',
[param('uint8_t', 'ipTos')])
## socket.h (module 'network'): void ns3::Socket::SetIpTtl(uint8_t ipTtl) [member function]
cls.add_method('SetIpTtl',
'void',
[param('uint8_t', 'ipTtl')],
is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::SetIpv6HopLimit(uint8_t ipHopLimit) [member function]
cls.add_method('SetIpv6HopLimit',
'void',
[param('uint8_t', 'ipHopLimit')],
is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::SetIpv6RecvHopLimit(bool ipv6RecvHopLimit) [member function]
cls.add_method('SetIpv6RecvHopLimit',
'void',
[param('bool', 'ipv6RecvHopLimit')])
## socket.h (module 'network'): void ns3::Socket::SetIpv6RecvTclass(bool ipv6RecvTclass) [member function]
cls.add_method('SetIpv6RecvTclass',
'void',
[param('bool', 'ipv6RecvTclass')])
## socket.h (module 'network'): void ns3::Socket::SetIpv6Tclass(int ipTclass) [member function]
cls.add_method('SetIpv6Tclass',
'void',
[param('int', 'ipTclass')])
## socket.h (module 'network'): void ns3::Socket::SetPriority(uint8_t priority) [member function]
cls.add_method('SetPriority',
'void',
[param('uint8_t', 'priority')])
## socket.h (module 'network'): void ns3::Socket::SetRecvCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> arg0) [member function]
cls.add_method('SetRecvCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'arg0')])
## socket.h (module 'network'): void ns3::Socket::SetRecvPktInfo(bool flag) [member function]
cls.add_method('SetRecvPktInfo',
'void',
[param('bool', 'flag')])
## socket.h (module 'network'): void ns3::Socket::SetSendCallback(ns3::Callback<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> sendCb) [member function]
cls.add_method('SetSendCallback',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'sendCb')])
## socket.h (module 'network'): int ns3::Socket::ShutdownRecv() [member function]
cls.add_method('ShutdownRecv',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): int ns3::Socket::ShutdownSend() [member function]
cls.add_method('ShutdownSend',
'int',
[],
is_pure_virtual=True, is_virtual=True)
## socket.h (module 'network'): void ns3::Socket::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## socket.h (module 'network'): bool ns3::Socket::IsManualIpTtl() const [member function]
cls.add_method('IsManualIpTtl',
'bool',
[],
is_const=True, visibility='protected')
## socket.h (module 'network'): bool ns3::Socket::IsManualIpv6HopLimit() const [member function]
cls.add_method('IsManualIpv6HopLimit',
'bool',
[],
is_const=True, visibility='protected')
## socket.h (module 'network'): bool ns3::Socket::IsManualIpv6Tclass() const [member function]
cls.add_method('IsManualIpv6Tclass',
'bool',
[],
is_const=True, visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyConnectionFailed() [member function]
cls.add_method('NotifyConnectionFailed',
'void',
[],
visibility='protected')
## socket.h (module 'network'): bool ns3::Socket::NotifyConnectionRequest(ns3::Address const & from) [member function]
cls.add_method('NotifyConnectionRequest',
'bool',
[param('ns3::Address const &', 'from')],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyConnectionSucceeded() [member function]
cls.add_method('NotifyConnectionSucceeded',
'void',
[],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyDataRecv() [member function]
cls.add_method('NotifyDataRecv',
'void',
[],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyDataSent(uint32_t size) [member function]
cls.add_method('NotifyDataSent',
'void',
[param('uint32_t', 'size')],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyErrorClose() [member function]
cls.add_method('NotifyErrorClose',
'void',
[],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyNewConnectionCreated(ns3::Ptr<ns3::Socket> socket, ns3::Address const & from) [member function]
cls.add_method('NotifyNewConnectionCreated',
'void',
[param('ns3::Ptr< ns3::Socket >', 'socket'), param('ns3::Address const &', 'from')],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifyNormalClose() [member function]
cls.add_method('NotifyNormalClose',
'void',
[],
visibility='protected')
## socket.h (module 'network'): void ns3::Socket::NotifySend(uint32_t spaceAvailable) [member function]
cls.add_method('NotifySend',
'void',
[param('uint32_t', 'spaceAvailable')],
visibility='protected')
return
def register_Ns3SocketIpTosTag_methods(root_module, cls):
## socket.h (module 'network'): ns3::SocketIpTosTag::SocketIpTosTag(ns3::SocketIpTosTag const & arg0) [constructor]
cls.add_constructor([param('ns3::SocketIpTosTag const &', 'arg0')])
## socket.h (module 'network'): ns3::SocketIpTosTag::SocketIpTosTag() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): void ns3::SocketIpTosTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## socket.h (module 'network'): ns3::TypeId ns3::SocketIpTosTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::SocketIpTosTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::SocketIpTosTag::GetTos() const [member function]
cls.add_method('GetTos',
'uint8_t',
[],
is_const=True)
## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpTosTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): void ns3::SocketIpTosTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpTosTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpTosTag::SetTos(uint8_t tos) [member function]
cls.add_method('SetTos',
'void',
[param('uint8_t', 'tos')])
return
def register_Ns3SocketIpTtlTag_methods(root_module, cls):
## socket.h (module 'network'): ns3::SocketIpTtlTag::SocketIpTtlTag(ns3::SocketIpTtlTag const & arg0) [constructor]
cls.add_constructor([param('ns3::SocketIpTtlTag const &', 'arg0')])
## socket.h (module 'network'): ns3::SocketIpTtlTag::SocketIpTtlTag() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): void ns3::SocketIpTtlTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## socket.h (module 'network'): ns3::TypeId ns3::SocketIpTtlTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::SocketIpTtlTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::SocketIpTtlTag::GetTtl() const [member function]
cls.add_method('GetTtl',
'uint8_t',
[],
is_const=True)
## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpTtlTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): void ns3::SocketIpTtlTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpTtlTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpTtlTag::SetTtl(uint8_t ttl) [member function]
cls.add_method('SetTtl',
'void',
[param('uint8_t', 'ttl')])
return
def register_Ns3SocketIpv6HopLimitTag_methods(root_module, cls):
## socket.h (module 'network'): ns3::SocketIpv6HopLimitTag::SocketIpv6HopLimitTag(ns3::SocketIpv6HopLimitTag const & arg0) [constructor]
cls.add_constructor([param('ns3::SocketIpv6HopLimitTag const &', 'arg0')])
## socket.h (module 'network'): ns3::SocketIpv6HopLimitTag::SocketIpv6HopLimitTag() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): void ns3::SocketIpv6HopLimitTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::SocketIpv6HopLimitTag::GetHopLimit() const [member function]
cls.add_method('GetHopLimit',
'uint8_t',
[],
is_const=True)
## socket.h (module 'network'): ns3::TypeId ns3::SocketIpv6HopLimitTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::SocketIpv6HopLimitTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpv6HopLimitTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): void ns3::SocketIpv6HopLimitTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpv6HopLimitTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpv6HopLimitTag::SetHopLimit(uint8_t hopLimit) [member function]
cls.add_method('SetHopLimit',
'void',
[param('uint8_t', 'hopLimit')])
return
def register_Ns3SocketIpv6TclassTag_methods(root_module, cls):
## socket.h (module 'network'): ns3::SocketIpv6TclassTag::SocketIpv6TclassTag(ns3::SocketIpv6TclassTag const & arg0) [constructor]
cls.add_constructor([param('ns3::SocketIpv6TclassTag const &', 'arg0')])
## socket.h (module 'network'): ns3::SocketIpv6TclassTag::SocketIpv6TclassTag() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): void ns3::SocketIpv6TclassTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## socket.h (module 'network'): ns3::TypeId ns3::SocketIpv6TclassTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::SocketIpv6TclassTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::SocketIpv6TclassTag::GetTclass() const [member function]
cls.add_method('GetTclass',
'uint8_t',
[],
is_const=True)
## socket.h (module 'network'): static ns3::TypeId ns3::SocketIpv6TclassTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): void ns3::SocketIpv6TclassTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpv6TclassTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketIpv6TclassTag::SetTclass(uint8_t tclass) [member function]
cls.add_method('SetTclass',
'void',
[param('uint8_t', 'tclass')])
return
def register_Ns3SocketPriorityTag_methods(root_module, cls):
## socket.h (module 'network'): ns3::SocketPriorityTag::SocketPriorityTag(ns3::SocketPriorityTag const & arg0) [constructor]
cls.add_constructor([param('ns3::SocketPriorityTag const &', 'arg0')])
## socket.h (module 'network'): ns3::SocketPriorityTag::SocketPriorityTag() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): void ns3::SocketPriorityTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## socket.h (module 'network'): ns3::TypeId ns3::SocketPriorityTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint8_t ns3::SocketPriorityTag::GetPriority() const [member function]
cls.add_method('GetPriority',
'uint8_t',
[],
is_const=True)
## socket.h (module 'network'): uint32_t ns3::SocketPriorityTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): static ns3::TypeId ns3::SocketPriorityTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): void ns3::SocketPriorityTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketPriorityTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketPriorityTag::SetPriority(uint8_t priority) [member function]
cls.add_method('SetPriority',
'void',
[param('uint8_t', 'priority')])
return
def register_Ns3SocketSetDontFragmentTag_methods(root_module, cls):
## socket.h (module 'network'): ns3::SocketSetDontFragmentTag::SocketSetDontFragmentTag(ns3::SocketSetDontFragmentTag const & arg0) [constructor]
cls.add_constructor([param('ns3::SocketSetDontFragmentTag const &', 'arg0')])
## socket.h (module 'network'): ns3::SocketSetDontFragmentTag::SocketSetDontFragmentTag() [constructor]
cls.add_constructor([])
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Disable() [member function]
cls.add_method('Disable',
'void',
[])
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Enable() [member function]
cls.add_method('Enable',
'void',
[])
## socket.h (module 'network'): ns3::TypeId ns3::SocketSetDontFragmentTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): uint32_t ns3::SocketSetDontFragmentTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## socket.h (module 'network'): static ns3::TypeId ns3::SocketSetDontFragmentTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## socket.h (module 'network'): bool ns3::SocketSetDontFragmentTag::IsEnabled() const [member function]
cls.add_method('IsEnabled',
'bool',
[],
is_const=True)
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## socket.h (module 'network'): void ns3::SocketSetDontFragmentTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('>=')
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right'))
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function]
cls.add_method('As',
'ns3::TimeWithUnit',
[param('ns3::Time::Unit const', 'unit')],
is_const=True)
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function]
cls.add_method('GetDays',
'double',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function]
cls.add_method('GetHours',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function]
cls.add_method('GetMinutes',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function]
cls.add_method('GetYears',
'double',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function]
cls.add_method('Max',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function]
cls.add_method('Min',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function]
cls.add_method('StaticInit',
'bool',
[],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor]
cls.add_constructor([])
## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [constructor]
cls.add_constructor([param('ns3::Trailer const &', 'arg0')])
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'end')],
is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')],
is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TriangularRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::TriangularRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable::TriangularRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue(double mean, double min, double max) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'min'), param('double', 'max')])
## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger(uint32_t mean, uint32_t min, uint32_t max) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'min'), param('uint32_t', 'max')])
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3UniformRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::UniformRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable::UniformRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue(double min, double max) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'min'), param('double', 'max')])
## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger(uint32_t min, uint32_t max) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'min'), param('uint32_t', 'max')])
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3WeibullRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::WeibullRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable::WeibullRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetScale() const [member function]
cls.add_method('GetScale',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetShape() const [member function]
cls.add_method('GetShape',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue(double scale, double shape, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'scale'), param('double', 'shape'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger(uint32_t scale, uint32_t shape, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'scale'), param('uint32_t', 'shape'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ZetaRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZetaRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable::ZetaRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue(double alpha) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'alpha')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger(uint32_t alpha) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'alpha')])
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ZipfRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZipfRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable::ZipfRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue(uint32_t n, double alpha) [member function]
cls.add_method('GetValue',
'double',
[param('uint32_t', 'n'), param('double', 'alpha')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger(uint32_t n, uint32_t alpha) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'n'), param('uint32_t', 'alpha')])
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<const ns3::CallbackImplBase> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, visibility='protected', template_parameters=[u'ns3::ObjectBase*'])
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, visibility='protected', template_parameters=[u'void'])
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::NetDevice> '])
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::Packet const> '])
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, visibility='protected', template_parameters=[u'unsigned short'])
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, visibility='protected', template_parameters=[u'ns3::Address const&'])
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, visibility='protected', template_parameters=[u'ns3::NetDevice::PacketType'])
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, visibility='protected', template_parameters=[u'ns3::Ptr<ns3::Socket> '])
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, visibility='protected', template_parameters=[u'bool'])
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, visibility='protected', template_parameters=[u'unsigned int'])
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, visibility='protected', template_parameters=[u'ns3::olsr::PacketHeader const&'])
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, visibility='protected', template_parameters=[u'std::vector<ns3::olsr::MessageHeader', u' std::allocator<ns3::olsr::MessageHeader> > const&'])
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3ConstantRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ConstantRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable::ConstantRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetConstant() const [member function]
cls.add_method('GetConstant',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue(double constant) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'constant')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger(uint32_t constant) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'constant')])
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3DeterministicRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::DeterministicRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable::DeterministicRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::DeterministicRandomVariable::SetValueArray(double * values, std::size_t length) [member function]
cls.add_method('SetValueArray',
'void',
[param('double *', 'values'), param('std::size_t', 'length')])
## random-variable-stream.h (module 'core'): double ns3::DeterministicRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::DeterministicRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3EmpiricalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable::EmpiricalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::CDF(double v, double c) [member function]
cls.add_method('CDF',
'void',
[param('double', 'v'), param('double', 'c')])
## random-variable-stream.h (module 'core'): uint32_t ns3::EmpiricalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::EmpiricalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::Interpolate(double c1, double c2, double v1, double v2, double r) [member function]
cls.add_method('Interpolate',
'double',
[param('double', 'c1'), param('double', 'c2'), param('double', 'v1'), param('double', 'v2'), param('double', 'r')],
visibility='private', is_virtual=True)
## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::Validate() [member function]
cls.add_method('Validate',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3EmptyAttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor(ns3::EmptyAttributeAccessor const & arg0) [constructor]
cls.add_constructor([param('ns3::EmptyAttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object'), param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker(ns3::EmptyAttributeChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::EmptyAttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3ErlangRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ErlangRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable::ErlangRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetK() const [member function]
cls.add_method('GetK',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetLambda() const [member function]
cls.add_method('GetLambda',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue(uint32_t k, double lambda) [member function]
cls.add_method('GetValue',
'double',
[param('uint32_t', 'k'), param('double', 'lambda')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger(uint32_t k, uint32_t lambda) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'k'), param('uint32_t', 'lambda')])
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3EventImpl_methods(root_module, cls):
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [constructor]
cls.add_constructor([param('ns3::EventImpl const &', 'arg0')])
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor]
cls.add_constructor([])
## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[])
## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function]
cls.add_method('IsCancelled',
'bool',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function]
cls.add_method('Notify',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3ExponentialRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ExponentialRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable::ExponentialRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue(double mean, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger(uint32_t mean, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3GammaRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::GammaRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable::GammaRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetBeta() const [member function]
cls.add_method('GetBeta',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue(double alpha, double beta) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'alpha'), param('double', 'beta')])
## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger(uint32_t alpha, uint32_t beta) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'alpha'), param('uint32_t', 'beta')])
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3Ipv4_methods(root_module, cls):
## ipv4.h (module 'internet'): ns3::Ipv4::Ipv4(ns3::Ipv4 const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4 const &', 'arg0')])
## ipv4.h (module 'internet'): ns3::Ipv4::Ipv4() [constructor]
cls.add_constructor([])
## ipv4.h (module 'internet'): bool ns3::Ipv4::AddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('AddAddress',
'bool',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::AddInterface(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddInterface',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ptr<ns3::Socket> ns3::Ipv4::CreateRawSocket() [member function]
cls.add_method('CreateRawSocket',
'ns3::Ptr< ns3::Socket >',
[],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::DeleteRawSocket(ns3::Ptr<ns3::Socket> socket) [member function]
cls.add_method('DeleteRawSocket',
'void',
[param('ns3::Ptr< ns3::Socket >', 'socket')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ipv4InterfaceAddress ns3::Ipv4::GetAddress(uint32_t interface, uint32_t addressIndex) const [member function]
cls.add_method('GetAddress',
'ns3::Ipv4InterfaceAddress',
[param('uint32_t', 'interface'), param('uint32_t', 'addressIndex')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForAddress(ns3::Ipv4Address address) const [member function]
cls.add_method('GetInterfaceForAddress',
'int32_t',
[param('ns3::Ipv4Address', 'address')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForDevice(ns3::Ptr<const ns3::NetDevice> device) const [member function]
cls.add_method('GetInterfaceForDevice',
'int32_t',
[param('ns3::Ptr< ns3::NetDevice const >', 'device')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): int32_t ns3::Ipv4::GetInterfaceForPrefix(ns3::Ipv4Address address, ns3::Ipv4Mask mask) const [member function]
cls.add_method('GetInterfaceForPrefix',
'int32_t',
[param('ns3::Ipv4Address', 'address'), param('ns3::Ipv4Mask', 'mask')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): uint16_t ns3::Ipv4::GetMetric(uint32_t interface) const [member function]
cls.add_method('GetMetric',
'uint16_t',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): uint16_t ns3::Ipv4::GetMtu(uint32_t interface) const [member function]
cls.add_method('GetMtu',
'uint16_t',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::GetNAddresses(uint32_t interface) const [member function]
cls.add_method('GetNAddresses',
'uint32_t',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): uint32_t ns3::Ipv4::GetNInterfaces() const [member function]
cls.add_method('GetNInterfaces',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4::GetNetDevice(uint32_t interface) [member function]
cls.add_method('GetNetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ptr<ns3::IpL4Protocol> ns3::Ipv4::GetProtocol(int protocolNumber) const [member function]
cls.add_method('GetProtocol',
'ns3::Ptr< ns3::IpL4Protocol >',
[param('int', 'protocolNumber')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ptr<ns3::IpL4Protocol> ns3::Ipv4::GetProtocol(int protocolNumber, int32_t interfaceIndex) const [member function]
cls.add_method('GetProtocol',
'ns3::Ptr< ns3::IpL4Protocol >',
[param('int', 'protocolNumber'), param('int32_t', 'interfaceIndex')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4::GetRoutingProtocol() const [member function]
cls.add_method('GetRoutingProtocol',
'ns3::Ptr< ns3::Ipv4RoutingProtocol >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): static ns3::TypeId ns3::Ipv4::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::Insert(ns3::Ptr<ns3::IpL4Protocol> protocol) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::Insert(ns3::Ptr<ns3::IpL4Protocol> protocol, uint32_t interfaceIndex) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol'), param('uint32_t', 'interfaceIndex')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::IsDestinationAddress(ns3::Ipv4Address address, uint32_t iif) const [member function]
cls.add_method('IsDestinationAddress',
'bool',
[param('ns3::Ipv4Address', 'address'), param('uint32_t', 'iif')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::IsForwarding(uint32_t interface) const [member function]
cls.add_method('IsForwarding',
'bool',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::IsUp(uint32_t interface) const [member function]
cls.add_method('IsUp',
'bool',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::Remove(ns3::Ptr<ns3::IpL4Protocol> protocol) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::Remove(ns3::Ptr<ns3::IpL4Protocol> protocol, uint32_t interfaceIndex) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Ptr< ns3::IpL4Protocol >', 'protocol'), param('uint32_t', 'interfaceIndex')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::RemoveAddress(uint32_t interface, uint32_t addressIndex) [member function]
cls.add_method('RemoveAddress',
'bool',
[param('uint32_t', 'interface'), param('uint32_t', 'addressIndex')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::RemoveAddress(uint32_t interface, ns3::Ipv4Address address) [member function]
cls.add_method('RemoveAddress',
'bool',
[param('uint32_t', 'interface'), param('ns3::Ipv4Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4::SelectSourceAddress(ns3::Ptr<const ns3::NetDevice> device, ns3::Ipv4Address dst, ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e scope) [member function]
cls.add_method('SelectSourceAddress',
'ns3::Ipv4Address',
[param('ns3::Ptr< ns3::NetDevice const >', 'device'), param('ns3::Ipv4Address', 'dst'), param('ns3::Ipv4InterfaceAddress::InterfaceAddressScope_e', 'scope')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::Send(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Address source, ns3::Ipv4Address destination, uint8_t protocol, ns3::Ptr<ns3::Ipv4Route> route) [member function]
cls.add_method('Send',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Address', 'source'), param('ns3::Ipv4Address', 'destination'), param('uint8_t', 'protocol'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SendWithHeader(ns3::Ptr<ns3::Packet> packet, ns3::Ipv4Header ipHeader, ns3::Ptr<ns3::Ipv4Route> route) [member function]
cls.add_method('SendWithHeader',
'void',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ipv4Header', 'ipHeader'), param('ns3::Ptr< ns3::Ipv4Route >', 'route')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetDown(uint32_t interface) [member function]
cls.add_method('SetDown',
'void',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetForwarding(uint32_t interface, bool val) [member function]
cls.add_method('SetForwarding',
'void',
[param('uint32_t', 'interface'), param('bool', 'val')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetMetric(uint32_t interface, uint16_t metric) [member function]
cls.add_method('SetMetric',
'void',
[param('uint32_t', 'interface'), param('uint16_t', 'metric')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetRoutingProtocol(ns3::Ptr<ns3::Ipv4RoutingProtocol> routingProtocol) [member function]
cls.add_method('SetRoutingProtocol',
'void',
[param('ns3::Ptr< ns3::Ipv4RoutingProtocol >', 'routingProtocol')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetUp(uint32_t interface) [member function]
cls.add_method('SetUp',
'void',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4::SourceAddressSelection(uint32_t interface, ns3::Ipv4Address dest) [member function]
cls.add_method('SourceAddressSelection',
'ns3::Ipv4Address',
[param('uint32_t', 'interface'), param('ns3::Ipv4Address', 'dest')],
is_pure_virtual=True, is_virtual=True)
## ipv4.h (module 'internet'): ns3::Ipv4::IF_ANY [variable]
cls.add_static_attribute('IF_ANY', 'uint32_t const', is_const=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::GetIpForward() const [member function]
cls.add_method('GetIpForward',
'bool',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## ipv4.h (module 'internet'): bool ns3::Ipv4::GetWeakEsModel() const [member function]
cls.add_method('GetWeakEsModel',
'bool',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetIpForward(bool forward) [member function]
cls.add_method('SetIpForward',
'void',
[param('bool', 'forward')],
is_pure_virtual=True, visibility='private', is_virtual=True)
## ipv4.h (module 'internet'): void ns3::Ipv4::SetWeakEsModel(bool model) [member function]
cls.add_method('SetWeakEsModel',
'void',
[param('bool', 'model')],
is_pure_virtual=True, visibility='private', is_virtual=True)
return
def register_Ns3Ipv4AddressChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv4AddressValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Address const &', 'value')])
return
def register_Ns3Ipv4MaskChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')])
return
def register_Ns3Ipv4MaskValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Mask const &', 'value')])
return
def register_Ns3Ipv4MulticastRoute_methods(root_module, cls):
## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::Ipv4MulticastRoute(ns3::Ipv4MulticastRoute const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4MulticastRoute const &', 'arg0')])
## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::Ipv4MulticastRoute() [constructor]
cls.add_constructor([])
## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4MulticastRoute::GetGroup() const [member function]
cls.add_method('GetGroup',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4MulticastRoute::GetOrigin() const [member function]
cls.add_method('GetOrigin',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-route.h (module 'internet'): std::map<unsigned int, unsigned int, std::less<unsigned int>, std::allocator<std::pair<const unsigned int, unsigned int> > > ns3::Ipv4MulticastRoute::GetOutputTtlMap() const [member function]
cls.add_method('GetOutputTtlMap',
'std::map< unsigned int, unsigned int >',
[],
is_const=True)
## ipv4-route.h (module 'internet'): uint32_t ns3::Ipv4MulticastRoute::GetParent() const [member function]
cls.add_method('GetParent',
'uint32_t',
[],
is_const=True)
## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetGroup(ns3::Ipv4Address const group) [member function]
cls.add_method('SetGroup',
'void',
[param('ns3::Ipv4Address const', 'group')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetOrigin(ns3::Ipv4Address const origin) [member function]
cls.add_method('SetOrigin',
'void',
[param('ns3::Ipv4Address const', 'origin')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetOutputTtl(uint32_t oif, uint32_t ttl) [member function]
cls.add_method('SetOutputTtl',
'void',
[param('uint32_t', 'oif'), param('uint32_t', 'ttl')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4MulticastRoute::SetParent(uint32_t iif) [member function]
cls.add_method('SetParent',
'void',
[param('uint32_t', 'iif')])
## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::MAX_INTERFACES [variable]
cls.add_static_attribute('MAX_INTERFACES', 'uint32_t const', is_const=True)
## ipv4-route.h (module 'internet'): ns3::Ipv4MulticastRoute::MAX_TTL [variable]
cls.add_static_attribute('MAX_TTL', 'uint32_t const', is_const=True)
return
def register_Ns3Ipv4Route_methods(root_module, cls):
cls.add_output_stream_operator()
## ipv4-route.h (module 'internet'): ns3::Ipv4Route::Ipv4Route(ns3::Ipv4Route const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4Route const &', 'arg0')])
## ipv4-route.h (module 'internet'): ns3::Ipv4Route::Ipv4Route() [constructor]
cls.add_constructor([])
## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetDestination() const [member function]
cls.add_method('GetDestination',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetGateway() const [member function]
cls.add_method('GetGateway',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-route.h (module 'internet'): ns3::Ptr<ns3::NetDevice> ns3::Ipv4Route::GetOutputDevice() const [member function]
cls.add_method('GetOutputDevice',
'ns3::Ptr< ns3::NetDevice >',
[],
is_const=True)
## ipv4-route.h (module 'internet'): ns3::Ipv4Address ns3::Ipv4Route::GetSource() const [member function]
cls.add_method('GetSource',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetDestination(ns3::Ipv4Address dest) [member function]
cls.add_method('SetDestination',
'void',
[param('ns3::Ipv4Address', 'dest')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetGateway(ns3::Ipv4Address gw) [member function]
cls.add_method('SetGateway',
'void',
[param('ns3::Ipv4Address', 'gw')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetOutputDevice(ns3::Ptr<ns3::NetDevice> outputDevice) [member function]
cls.add_method('SetOutputDevice',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'outputDevice')])
## ipv4-route.h (module 'internet'): void ns3::Ipv4Route::SetSource(ns3::Ipv4Address src) [member function]
cls.add_method('SetSource',
'void',
[param('ns3::Ipv4Address', 'src')])
return
def register_Ns3Ipv4RoutingProtocol_methods(root_module, cls):
## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol::Ipv4RoutingProtocol() [constructor]
cls.add_constructor([])
## ipv4-routing-protocol.h (module 'internet'): ns3::Ipv4RoutingProtocol::Ipv4RoutingProtocol(ns3::Ipv4RoutingProtocol const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4RoutingProtocol const &', 'arg0')])
## ipv4-routing-protocol.h (module 'internet'): static ns3::TypeId ns3::Ipv4RoutingProtocol::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyAddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('NotifyAddAddress',
'void',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyInterfaceDown(uint32_t interface) [member function]
cls.add_method('NotifyInterfaceDown',
'void',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyInterfaceUp(uint32_t interface) [member function]
cls.add_method('NotifyInterfaceUp',
'void',
[param('uint32_t', 'interface')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::NotifyRemoveAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('NotifyRemoveAddress',
'void',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::PrintRoutingTable(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Time::Unit unit=::ns3::Time::Unit::S) const [member function]
cls.add_method('PrintRoutingTable',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Time::Unit', 'unit', default_value='::ns3::Time::Unit::S')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): bool ns3::Ipv4RoutingProtocol::RouteInput(ns3::Ptr<const ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<const ns3::NetDevice> idev, ns3::Ipv4RoutingProtocol::UnicastForwardCallback ucb, ns3::Ipv4RoutingProtocol::MulticastForwardCallback mcb, ns3::Ipv4RoutingProtocol::LocalDeliverCallback lcb, ns3::Ipv4RoutingProtocol::ErrorCallback ecb) [member function]
cls.add_method('RouteInput',
'bool',
[param('ns3::Ptr< ns3::Packet const >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice const >', 'idev'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ucb'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'mcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'lcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): ns3::Ptr<ns3::Ipv4Route> ns3::Ipv4RoutingProtocol::RouteOutput(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<ns3::NetDevice> oif, ns3::Socket::SocketErrno & sockerr) [member function]
cls.add_method('RouteOutput',
'ns3::Ptr< ns3::Ipv4Route >',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice >', 'oif'), param('ns3::Socket::SocketErrno &', 'sockerr')],
is_pure_virtual=True, is_virtual=True)
## ipv4-routing-protocol.h (module 'internet'): void ns3::Ipv4RoutingProtocol::SetIpv4(ns3::Ptr<ns3::Ipv4> ipv4) [member function]
cls.add_method('SetIpv4',
'void',
[param('ns3::Ptr< ns3::Ipv4 >', 'ipv4')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3Ipv4StaticRouting_methods(root_module, cls):
## ipv4-static-routing.h (module 'internet'): ns3::Ipv4StaticRouting::Ipv4StaticRouting(ns3::Ipv4StaticRouting const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4StaticRouting const &', 'arg0')])
## ipv4-static-routing.h (module 'internet'): ns3::Ipv4StaticRouting::Ipv4StaticRouting() [constructor]
cls.add_constructor([])
## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::AddHostRouteTo(ns3::Ipv4Address dest, ns3::Ipv4Address nextHop, uint32_t interface, uint32_t metric=0) [member function]
cls.add_method('AddHostRouteTo',
'void',
[param('ns3::Ipv4Address', 'dest'), param('ns3::Ipv4Address', 'nextHop'), param('uint32_t', 'interface'), param('uint32_t', 'metric', default_value='0')])
## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::AddHostRouteTo(ns3::Ipv4Address dest, uint32_t interface, uint32_t metric=0) [member function]
cls.add_method('AddHostRouteTo',
'void',
[param('ns3::Ipv4Address', 'dest'), param('uint32_t', 'interface'), param('uint32_t', 'metric', default_value='0')])
## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::AddMulticastRoute(ns3::Ipv4Address origin, ns3::Ipv4Address group, uint32_t inputInterface, std::vector<unsigned int, std::allocator<unsigned int> > outputInterfaces) [member function]
cls.add_method('AddMulticastRoute',
'void',
[param('ns3::Ipv4Address', 'origin'), param('ns3::Ipv4Address', 'group'), param('uint32_t', 'inputInterface'), param('std::vector< unsigned int >', 'outputInterfaces')])
## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::AddNetworkRouteTo(ns3::Ipv4Address network, ns3::Ipv4Mask networkMask, ns3::Ipv4Address nextHop, uint32_t interface, uint32_t metric=0) [member function]
cls.add_method('AddNetworkRouteTo',
'void',
[param('ns3::Ipv4Address', 'network'), param('ns3::Ipv4Mask', 'networkMask'), param('ns3::Ipv4Address', 'nextHop'), param('uint32_t', 'interface'), param('uint32_t', 'metric', default_value='0')])
## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::AddNetworkRouteTo(ns3::Ipv4Address network, ns3::Ipv4Mask networkMask, uint32_t interface, uint32_t metric=0) [member function]
cls.add_method('AddNetworkRouteTo',
'void',
[param('ns3::Ipv4Address', 'network'), param('ns3::Ipv4Mask', 'networkMask'), param('uint32_t', 'interface'), param('uint32_t', 'metric', default_value='0')])
## ipv4-static-routing.h (module 'internet'): ns3::Ipv4RoutingTableEntry ns3::Ipv4StaticRouting::GetDefaultRoute() [member function]
cls.add_method('GetDefaultRoute',
'ns3::Ipv4RoutingTableEntry',
[])
## ipv4-static-routing.h (module 'internet'): uint32_t ns3::Ipv4StaticRouting::GetMetric(uint32_t index) const [member function]
cls.add_method('GetMetric',
'uint32_t',
[param('uint32_t', 'index')],
is_const=True)
## ipv4-static-routing.h (module 'internet'): ns3::Ipv4MulticastRoutingTableEntry ns3::Ipv4StaticRouting::GetMulticastRoute(uint32_t i) const [member function]
cls.add_method('GetMulticastRoute',
'ns3::Ipv4MulticastRoutingTableEntry',
[param('uint32_t', 'i')],
is_const=True)
## ipv4-static-routing.h (module 'internet'): uint32_t ns3::Ipv4StaticRouting::GetNMulticastRoutes() const [member function]
cls.add_method('GetNMulticastRoutes',
'uint32_t',
[],
is_const=True)
## ipv4-static-routing.h (module 'internet'): uint32_t ns3::Ipv4StaticRouting::GetNRoutes() const [member function]
cls.add_method('GetNRoutes',
'uint32_t',
[],
is_const=True)
## ipv4-static-routing.h (module 'internet'): ns3::Ipv4RoutingTableEntry ns3::Ipv4StaticRouting::GetRoute(uint32_t i) const [member function]
cls.add_method('GetRoute',
'ns3::Ipv4RoutingTableEntry',
[param('uint32_t', 'i')],
is_const=True)
## ipv4-static-routing.h (module 'internet'): static ns3::TypeId ns3::Ipv4StaticRouting::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::NotifyAddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('NotifyAddAddress',
'void',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
is_virtual=True)
## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::NotifyInterfaceDown(uint32_t interface) [member function]
cls.add_method('NotifyInterfaceDown',
'void',
[param('uint32_t', 'interface')],
is_virtual=True)
## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::NotifyInterfaceUp(uint32_t interface) [member function]
cls.add_method('NotifyInterfaceUp',
'void',
[param('uint32_t', 'interface')],
is_virtual=True)
## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::NotifyRemoveAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('NotifyRemoveAddress',
'void',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
is_virtual=True)
## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::PrintRoutingTable(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Time::Unit unit=::ns3::Time::Unit::S) const [member function]
cls.add_method('PrintRoutingTable',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Time::Unit', 'unit', default_value='::ns3::Time::Unit::S')],
is_const=True, is_virtual=True)
## ipv4-static-routing.h (module 'internet'): bool ns3::Ipv4StaticRouting::RemoveMulticastRoute(ns3::Ipv4Address origin, ns3::Ipv4Address group, uint32_t inputInterface) [member function]
cls.add_method('RemoveMulticastRoute',
'bool',
[param('ns3::Ipv4Address', 'origin'), param('ns3::Ipv4Address', 'group'), param('uint32_t', 'inputInterface')])
## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::RemoveMulticastRoute(uint32_t index) [member function]
cls.add_method('RemoveMulticastRoute',
'void',
[param('uint32_t', 'index')])
## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::RemoveRoute(uint32_t i) [member function]
cls.add_method('RemoveRoute',
'void',
[param('uint32_t', 'i')])
## ipv4-static-routing.h (module 'internet'): bool ns3::Ipv4StaticRouting::RouteInput(ns3::Ptr<const ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<const ns3::NetDevice> idev, ns3::Ipv4RoutingProtocol::UnicastForwardCallback ucb, ns3::Ipv4RoutingProtocol::MulticastForwardCallback mcb, ns3::Ipv4RoutingProtocol::LocalDeliverCallback lcb, ns3::Ipv4RoutingProtocol::ErrorCallback ecb) [member function]
cls.add_method('RouteInput',
'bool',
[param('ns3::Ptr< ns3::Packet const >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice const >', 'idev'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ucb'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'mcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'lcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb')],
is_virtual=True)
## ipv4-static-routing.h (module 'internet'): ns3::Ptr<ns3::Ipv4Route> ns3::Ipv4StaticRouting::RouteOutput(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<ns3::NetDevice> oif, ns3::Socket::SocketErrno & sockerr) [member function]
cls.add_method('RouteOutput',
'ns3::Ptr< ns3::Ipv4Route >',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice >', 'oif'), param('ns3::Socket::SocketErrno &', 'sockerr')],
is_virtual=True)
## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::SetDefaultMulticastRoute(uint32_t outputInterface) [member function]
cls.add_method('SetDefaultMulticastRoute',
'void',
[param('uint32_t', 'outputInterface')])
## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::SetDefaultRoute(ns3::Ipv4Address nextHop, uint32_t interface, uint32_t metric=0) [member function]
cls.add_method('SetDefaultRoute',
'void',
[param('ns3::Ipv4Address', 'nextHop'), param('uint32_t', 'interface'), param('uint32_t', 'metric', default_value='0')])
## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::SetIpv4(ns3::Ptr<ns3::Ipv4> ipv4) [member function]
cls.add_method('SetIpv4',
'void',
[param('ns3::Ptr< ns3::Ipv4 >', 'ipv4')],
is_virtual=True)
## ipv4-static-routing.h (module 'internet'): void ns3::Ipv4StaticRouting::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3Ipv6AddressChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv6AddressValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Address const &', 'value')])
return
def register_Ns3Ipv6PrefixChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')])
return
def register_Ns3Ipv6PrefixValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Prefix',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Prefix const &', 'value')])
return
def register_Ns3LogNormalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::LogNormalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable::LogNormalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetMu() const [member function]
cls.add_method('GetMu',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetSigma() const [member function]
cls.add_method('GetSigma',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue(double mu, double sigma) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mu'), param('double', 'sigma')])
## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger(uint32_t mu, uint32_t sigma) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mu'), param('uint32_t', 'sigma')])
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3Mac48AddressChecker_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')])
return
def register_Ns3Mac48AddressValue_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'value')])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [constructor]
cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Mac48Address',
[],
is_const=True)
## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Mac48Address const &', 'value')])
return
def register_Ns3NetDevice_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [constructor]
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::NetDevice::PromiscReceiveCallback cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::NetDevice::ReceiveCallback cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3NixVector_methods(root_module, cls):
cls.add_output_stream_operator()
## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor]
cls.add_constructor([])
## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [constructor]
cls.add_constructor([param('ns3::NixVector const &', 'o')])
## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function]
cls.add_method('AddNeighborIndex',
'void',
[param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function]
cls.add_method('BitCount',
'uint32_t',
[param('uint32_t', 'numberOfNeighbors')],
is_const=True)
## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function]
cls.add_method('ExtractNeighborIndex',
'uint32_t',
[param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function]
cls.add_method('GetRemainingBits',
'uint32_t',
[])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3Node_methods(root_module, cls):
## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [constructor]
cls.add_constructor([param('ns3::Node const &', 'arg0')])
## node.h (module 'network'): ns3::Node::Node() [constructor]
cls.add_constructor([])
## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor]
cls.add_constructor([param('uint32_t', 'systemId')])
## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function]
cls.add_method('AddApplication',
'uint32_t',
[param('ns3::Ptr< ns3::Application >', 'application')])
## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddDevice',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function]
cls.add_method('ChecksumEnabled',
'bool',
[],
is_static=True)
## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function]
cls.add_method('GetApplication',
'ns3::Ptr< ns3::Application >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): ns3::Time ns3::Node::GetLocalTime() const [member function]
cls.add_method('GetLocalTime',
'ns3::Time',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function]
cls.add_method('GetNApplications',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Node::DeviceAdditionListener listener) [member function]
cls.add_method('RegisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Node::ProtocolHandler handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function]
cls.add_method('RegisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')])
## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Node::DeviceAdditionListener listener) [member function]
cls.add_method('UnregisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Node::ProtocolHandler handler) [member function]
cls.add_method('UnregisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')])
## node.h (module 'network'): void ns3::Node::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## node.h (module 'network'): void ns3::Node::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3NormalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::INFINITE_VALUE [variable]
cls.add_static_attribute('INFINITE_VALUE', 'double const', is_const=True)
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::NormalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::NormalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetVariance() const [member function]
cls.add_method('GetVariance',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue(double mean, double variance, double bound=ns3::NormalRandomVariable::INFINITE_VALUE) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'variance'), param('double', 'bound', default_value='ns3::NormalRandomVariable::INFINITE_VALUE')])
## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger(uint32_t mean, uint32_t variance, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'variance'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3OutputStreamWrapper_methods(root_module, cls):
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(ns3::OutputStreamWrapper const & arg0) [constructor]
cls.add_constructor([param('ns3::OutputStreamWrapper const &', 'arg0')])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::string filename, std::ios_base::openmode filemode) [constructor]
cls.add_constructor([param('std::string', 'filename'), param('std::ios_base::openmode', 'filemode')])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::ostream * os) [constructor]
cls.add_constructor([param('std::ostream *', 'os')])
## output-stream-wrapper.h (module 'network'): std::ostream * ns3::OutputStreamWrapper::GetStream() [member function]
cls.add_method('GetStream',
'std::ostream *',
[])
return
def register_Ns3Packet_methods(root_module, cls):
cls.add_output_stream_operator()
## packet.h (module 'network'): ns3::Packet::Packet() [constructor]
cls.add_constructor([])
## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [constructor]
cls.add_constructor([param('ns3::Packet const &', 'o')])
## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor]
cls.add_constructor([param('uint32_t', 'size')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header')])
## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddPacketTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer')])
## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function]
cls.add_method('EnablePrinting',
'void',
[],
is_static=True)
## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function]
cls.add_method('FindFirstMatchingByteTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function]
cls.add_method('GetByteTagIterator',
'ns3::ByteTagIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function]
cls.add_method('GetNixVector',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function]
cls.add_method('GetPacketTagIterator',
'ns3::PacketTagIterator',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header, uint32_t size) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function]
cls.add_method('PeekPacketTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('PeekTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function]
cls.add_method('PrintByteTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function]
cls.add_method('PrintPacketTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function]
cls.add_method('RemoveAllByteTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function]
cls.add_method('RemoveAllPacketTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header'), param('uint32_t', 'size')])
## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function]
cls.add_method('RemovePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('RemoveTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): bool ns3::Packet::ReplacePacketTag(ns3::Tag & tag) [member function]
cls.add_method('ReplacePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> nixVector) [member function]
cls.add_method('SetNixVector',
'void',
[param('ns3::Ptr< ns3::NixVector >', 'nixVector')])
## packet.h (module 'network'): std::string ns3::Packet::ToString() const [member function]
cls.add_method('ToString',
'std::string',
[],
is_const=True)
return
def register_Ns3ParetoRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ParetoRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable::ParetoRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
deprecated=True, is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetScale() const [member function]
cls.add_method('GetScale',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetShape() const [member function]
cls.add_method('GetShape',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue(double scale, double shape, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'scale'), param('double', 'shape'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger(uint32_t scale, uint32_t shape, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'scale'), param('uint32_t', 'shape'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3AddressChecker_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')])
return
def register_Ns3AddressValue_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor]
cls.add_constructor([param('ns3::Address const &', 'value')])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [constructor]
cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])
## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Address',
[],
is_const=True)
## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Address const &', 'value')])
return
def register_Ns3CallbackImpl__Bool_Ns3Ptr__lt__ns3Socket__gt___Const_ns3Address___amp___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<bool, ns3::Ptr<ns3::Socket>, const ns3::Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<bool, ns3::Ptr<ns3::Socket>, const ns3::Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<bool, ns3::Ptr<ns3::Socket>, const ns3::Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< bool, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<bool, ns3::Ptr<ns3::Socket>, const ns3::Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<bool, ns3::Ptr<ns3::Socket>, const ns3::Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackImpl<bool, ns3::Ptr<ns3::Socket>, const ns3::Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()(ns3::Ptr<ns3::Socket> arg0, ns3::Address const & arg1) [member operator]
cls.add_method('operator()',
'bool',
[param('ns3::Ptr< ns3::Socket >', 'arg0'), param('ns3::Address const &', 'arg1')],
is_pure_virtual=True, is_virtual=True, custom_name=u'__call__')
return
def register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): ns3::ObjectBase * ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()() [member operator]
cls.add_method('operator()',
'ns3::ObjectBase *',
[],
is_pure_virtual=True, is_virtual=True, custom_name=u'__call__')
return
def register_Ns3CallbackImpl__Void_Const_ns3OlsrPacketHeader___amp___Const_stdVector__lt__ns3OlsrMessageHeader__stdAllocator__lt__ns3OlsrMessageHeader__gt_____gt_____amp___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<void, const ns3::olsr::PacketHeader &, const std::vector<ns3::olsr::MessageHeader, std::allocator<ns3::olsr::MessageHeader> > &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<void, const ns3::olsr::PacketHeader &, const std::vector<ns3::olsr::MessageHeader, std::allocator<ns3::olsr::MessageHeader> > &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<void, const ns3::olsr::PacketHeader &, const std::vector<ns3::olsr::MessageHeader, std::allocator<ns3::olsr::MessageHeader> > &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::olsr::PacketHeader const &, std::vector< ns3::olsr::MessageHeader, std::allocator< ns3::olsr::MessageHeader > > const, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<void, const ns3::olsr::PacketHeader &, const std::vector<ns3::olsr::MessageHeader, std::allocator<ns3::olsr::MessageHeader> > &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<void, const ns3::olsr::PacketHeader &, const std::vector<ns3::olsr::MessageHeader, std::allocator<ns3::olsr::MessageHeader> > &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackImpl<void, const ns3::olsr::PacketHeader &, const std::vector<ns3::olsr::MessageHeader, std::allocator<ns3::olsr::MessageHeader> > &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()(ns3::olsr::PacketHeader const & arg0, std::vector<ns3::olsr::MessageHeader, std::allocator<ns3::olsr::MessageHeader> > const & arg1) [member operator]
cls.add_method('operator()',
'void',
[param('ns3::olsr::PacketHeader const &', 'arg0'), param('std::vector< ns3::olsr::MessageHeader > const &', 'arg1')],
is_pure_virtual=True, is_virtual=True, custom_name=u'__call__')
cls.add_copy_constructor()
return
def register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Ptr__lt__const_ns3Packet__gt___Unsigned_short_Const_ns3Address___amp___Const_ns3Address___amp___Ns3NetDevicePacketType_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<const ns3::Packet>, unsigned short, const ns3::Address &, const ns3::Address &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty>::operator()(ns3::Ptr<ns3::NetDevice> arg0, ns3::Ptr<const ns3::Packet> arg1, short unsigned int arg2, ns3::Address const & arg3, ns3::Address const & arg4, ns3::NetDevice::PacketType arg5) [member operator]
cls.add_method('operator()',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'arg0'), param('ns3::Ptr< ns3::Packet const >', 'arg1'), param('short unsigned int', 'arg2'), param('ns3::Address const &', 'arg3'), param('ns3::Address const &', 'arg4'), param('ns3::NetDevice::PacketType', 'arg5')],
is_pure_virtual=True, is_virtual=True, custom_name=u'__call__')
return
def register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3NetDevice__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackImpl<void, ns3::Ptr<ns3::NetDevice>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()(ns3::Ptr<ns3::NetDevice> arg0) [member operator]
cls.add_method('operator()',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'arg0')],
is_pure_virtual=True, is_virtual=True, custom_name=u'__call__')
return
def register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3Socket__gt___Const_ns3Address___amp___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::Socket>, const ns3::Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::Socket>, const ns3::Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<void, ns3::Ptr<ns3::Socket>, const ns3::Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Ptr< ns3::Socket >, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<void, ns3::Ptr<ns3::Socket>, const ns3::Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<void, ns3::Ptr<ns3::Socket>, const ns3::Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackImpl<void, ns3::Ptr<ns3::Socket>, const ns3::Address &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()(ns3::Ptr<ns3::Socket> arg0, ns3::Address const & arg1) [member operator]
cls.add_method('operator()',
'void',
[param('ns3::Ptr< ns3::Socket >', 'arg0'), param('ns3::Address const &', 'arg1')],
is_pure_virtual=True, is_virtual=True, custom_name=u'__call__')
return
def register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3Socket__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Ptr< ns3::Socket >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackImpl<void, ns3::Ptr<ns3::Socket>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()(ns3::Ptr<ns3::Socket> arg0) [member operator]
cls.add_method('operator()',
'void',
[param('ns3::Ptr< ns3::Socket >', 'arg0')],
is_pure_virtual=True, is_virtual=True, custom_name=u'__call__')
return
def register_Ns3CallbackImpl__Void_Ns3Ptr__lt__ns3Socket__gt___Unsigned_int_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Ptr< ns3::Socket >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackImpl<void, ns3::Ptr<ns3::Socket>, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()(ns3::Ptr<ns3::Socket> arg0, unsigned int arg1) [member operator]
cls.add_method('operator()',
'void',
[param('ns3::Ptr< ns3::Socket >', 'arg0'), param('unsigned int', 'arg1')],
is_pure_virtual=True, is_virtual=True, custom_name=u'__call__')
return
def register_Ns3CallbackImpl__Void_Unsigned_int_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<void, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<void, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<void, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< void, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<void, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<void, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackImpl<void, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()(unsigned int arg0) [member operator]
cls.add_method('operator()',
'void',
[param('unsigned int', 'arg0')],
is_pure_virtual=True, is_virtual=True, custom_name=u'__call__')
return
def register_Ns3Ipv4ListRouting_methods(root_module, cls):
## ipv4-list-routing.h (module 'internet'): ns3::Ipv4ListRouting::Ipv4ListRouting(ns3::Ipv4ListRouting const & arg0) [constructor]
cls.add_constructor([param('ns3::Ipv4ListRouting const &', 'arg0')])
## ipv4-list-routing.h (module 'internet'): ns3::Ipv4ListRouting::Ipv4ListRouting() [constructor]
cls.add_constructor([])
## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::AddRoutingProtocol(ns3::Ptr<ns3::Ipv4RoutingProtocol> routingProtocol, int16_t priority) [member function]
cls.add_method('AddRoutingProtocol',
'void',
[param('ns3::Ptr< ns3::Ipv4RoutingProtocol >', 'routingProtocol'), param('int16_t', 'priority')],
is_virtual=True)
## ipv4-list-routing.h (module 'internet'): uint32_t ns3::Ipv4ListRouting::GetNRoutingProtocols() const [member function]
cls.add_method('GetNRoutingProtocols',
'uint32_t',
[],
is_const=True, is_virtual=True)
## ipv4-list-routing.h (module 'internet'): ns3::Ptr<ns3::Ipv4RoutingProtocol> ns3::Ipv4ListRouting::GetRoutingProtocol(uint32_t index, int16_t & priority) const [member function]
cls.add_method('GetRoutingProtocol',
'ns3::Ptr< ns3::Ipv4RoutingProtocol >',
[param('uint32_t', 'index'), param('int16_t &', 'priority', direction=2)],
is_const=True, is_virtual=True)
## ipv4-list-routing.h (module 'internet'): static ns3::TypeId ns3::Ipv4ListRouting::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::NotifyAddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('NotifyAddAddress',
'void',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
is_virtual=True)
## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::NotifyInterfaceDown(uint32_t interface) [member function]
cls.add_method('NotifyInterfaceDown',
'void',
[param('uint32_t', 'interface')],
is_virtual=True)
## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::NotifyInterfaceUp(uint32_t interface) [member function]
cls.add_method('NotifyInterfaceUp',
'void',
[param('uint32_t', 'interface')],
is_virtual=True)
## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::NotifyRemoveAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('NotifyRemoveAddress',
'void',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
is_virtual=True)
## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::PrintRoutingTable(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Time::Unit unit=::ns3::Time::Unit::S) const [member function]
cls.add_method('PrintRoutingTable',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Time::Unit', 'unit', default_value='::ns3::Time::Unit::S')],
is_const=True, is_virtual=True)
## ipv4-list-routing.h (module 'internet'): bool ns3::Ipv4ListRouting::RouteInput(ns3::Ptr<const ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<const ns3::NetDevice> idev, ns3::Ipv4RoutingProtocol::UnicastForwardCallback ucb, ns3::Ipv4RoutingProtocol::MulticastForwardCallback mcb, ns3::Ipv4RoutingProtocol::LocalDeliverCallback lcb, ns3::Ipv4RoutingProtocol::ErrorCallback ecb) [member function]
cls.add_method('RouteInput',
'bool',
[param('ns3::Ptr< ns3::Packet const >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice const >', 'idev'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ucb'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'mcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'lcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb')],
is_virtual=True)
## ipv4-list-routing.h (module 'internet'): ns3::Ptr<ns3::Ipv4Route> ns3::Ipv4ListRouting::RouteOutput(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<ns3::NetDevice> oif, ns3::Socket::SocketErrno & sockerr) [member function]
cls.add_method('RouteOutput',
'ns3::Ptr< ns3::Ipv4Route >',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice >', 'oif'), param('ns3::Socket::SocketErrno &', 'sockerr')],
is_virtual=True)
## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::SetIpv4(ns3::Ptr<ns3::Ipv4> ipv4) [member function]
cls.add_method('SetIpv4',
'void',
[param('ns3::Ptr< ns3::Ipv4 >', 'ipv4')],
is_virtual=True)
## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## ipv4-list-routing.h (module 'internet'): void ns3::Ipv4ListRouting::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3OlsrAssociation_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_output_stream_operator()
## olsr-repositories.h (module 'olsr'): ns3::olsr::Association::Association() [constructor]
cls.add_constructor([])
## olsr-repositories.h (module 'olsr'): ns3::olsr::Association::Association(ns3::olsr::Association const & arg0) [constructor]
cls.add_constructor([param('ns3::olsr::Association const &', 'arg0')])
## olsr-repositories.h (module 'olsr'): ns3::olsr::Association::netmask [variable]
cls.add_instance_attribute('netmask', 'ns3::Ipv4Mask', is_const=False)
## olsr-repositories.h (module 'olsr'): ns3::olsr::Association::networkAddr [variable]
cls.add_instance_attribute('networkAddr', 'ns3::Ipv4Address', is_const=False)
return
def register_Ns3OlsrAssociationTuple_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_output_stream_operator()
## olsr-repositories.h (module 'olsr'): ns3::olsr::AssociationTuple::AssociationTuple() [constructor]
cls.add_constructor([])
## olsr-repositories.h (module 'olsr'): ns3::olsr::AssociationTuple::AssociationTuple(ns3::olsr::AssociationTuple const & arg0) [constructor]
cls.add_constructor([param('ns3::olsr::AssociationTuple const &', 'arg0')])
## olsr-repositories.h (module 'olsr'): ns3::olsr::AssociationTuple::expirationTime [variable]
cls.add_instance_attribute('expirationTime', 'ns3::Time', is_const=False)
## olsr-repositories.h (module 'olsr'): ns3::olsr::AssociationTuple::gatewayAddr [variable]
cls.add_instance_attribute('gatewayAddr', 'ns3::Ipv4Address', is_const=False)
## olsr-repositories.h (module 'olsr'): ns3::olsr::AssociationTuple::netmask [variable]
cls.add_instance_attribute('netmask', 'ns3::Ipv4Mask', is_const=False)
## olsr-repositories.h (module 'olsr'): ns3::olsr::AssociationTuple::networkAddr [variable]
cls.add_instance_attribute('networkAddr', 'ns3::Ipv4Address', is_const=False)
return
def register_Ns3OlsrDuplicateTuple_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
## olsr-repositories.h (module 'olsr'): ns3::olsr::DuplicateTuple::DuplicateTuple() [constructor]
cls.add_constructor([])
## olsr-repositories.h (module 'olsr'): ns3::olsr::DuplicateTuple::DuplicateTuple(ns3::olsr::DuplicateTuple const & arg0) [constructor]
cls.add_constructor([param('ns3::olsr::DuplicateTuple const &', 'arg0')])
## olsr-repositories.h (module 'olsr'): ns3::olsr::DuplicateTuple::address [variable]
cls.add_instance_attribute('address', 'ns3::Ipv4Address', is_const=False)
## olsr-repositories.h (module 'olsr'): ns3::olsr::DuplicateTuple::expirationTime [variable]
cls.add_instance_attribute('expirationTime', 'ns3::Time', is_const=False)
## olsr-repositories.h (module 'olsr'): ns3::olsr::DuplicateTuple::ifaceList [variable]
cls.add_instance_attribute('ifaceList', 'std::vector< ns3::Ipv4Address >', is_const=False)
## olsr-repositories.h (module 'olsr'): ns3::olsr::DuplicateTuple::retransmitted [variable]
cls.add_instance_attribute('retransmitted', 'bool', is_const=False)
## olsr-repositories.h (module 'olsr'): ns3::olsr::DuplicateTuple::sequenceNumber [variable]
cls.add_instance_attribute('sequenceNumber', 'uint16_t', is_const=False)
return
def register_Ns3OlsrIfaceAssocTuple_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_output_stream_operator()
## olsr-repositories.h (module 'olsr'): ns3::olsr::IfaceAssocTuple::IfaceAssocTuple() [constructor]
cls.add_constructor([])
## olsr-repositories.h (module 'olsr'): ns3::olsr::IfaceAssocTuple::IfaceAssocTuple(ns3::olsr::IfaceAssocTuple const & arg0) [constructor]
cls.add_constructor([param('ns3::olsr::IfaceAssocTuple const &', 'arg0')])
## olsr-repositories.h (module 'olsr'): ns3::olsr::IfaceAssocTuple::ifaceAddr [variable]
cls.add_instance_attribute('ifaceAddr', 'ns3::Ipv4Address', is_const=False)
## olsr-repositories.h (module 'olsr'): ns3::olsr::IfaceAssocTuple::mainAddr [variable]
cls.add_instance_attribute('mainAddr', 'ns3::Ipv4Address', is_const=False)
## olsr-repositories.h (module 'olsr'): ns3::olsr::IfaceAssocTuple::time [variable]
cls.add_instance_attribute('time', 'ns3::Time', is_const=False)
return
def register_Ns3OlsrLinkTuple_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_output_stream_operator()
## olsr-repositories.h (module 'olsr'): ns3::olsr::LinkTuple::LinkTuple() [constructor]
cls.add_constructor([])
## olsr-repositories.h (module 'olsr'): ns3::olsr::LinkTuple::LinkTuple(ns3::olsr::LinkTuple const & arg0) [constructor]
cls.add_constructor([param('ns3::olsr::LinkTuple const &', 'arg0')])
## olsr-repositories.h (module 'olsr'): ns3::olsr::LinkTuple::asymTime [variable]
cls.add_instance_attribute('asymTime', 'ns3::Time', is_const=False)
## olsr-repositories.h (module 'olsr'): ns3::olsr::LinkTuple::localIfaceAddr [variable]
cls.add_instance_attribute('localIfaceAddr', 'ns3::Ipv4Address', is_const=False)
## olsr-repositories.h (module 'olsr'): ns3::olsr::LinkTuple::neighborIfaceAddr [variable]
cls.add_instance_attribute('neighborIfaceAddr', 'ns3::Ipv4Address', is_const=False)
## olsr-repositories.h (module 'olsr'): ns3::olsr::LinkTuple::symTime [variable]
cls.add_instance_attribute('symTime', 'ns3::Time', is_const=False)
## olsr-repositories.h (module 'olsr'): ns3::olsr::LinkTuple::time [variable]
cls.add_instance_attribute('time', 'ns3::Time', is_const=False)
return
def register_Ns3OlsrMessageHeader_methods(root_module, cls):
cls.add_output_stream_operator()
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::MessageHeader(ns3::olsr::MessageHeader const & arg0) [constructor]
cls.add_constructor([param('ns3::olsr::MessageHeader const &', 'arg0')])
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::MessageHeader() [constructor]
cls.add_constructor([])
## olsr-header.h (module 'olsr'): uint32_t ns3::olsr::MessageHeader::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hello & ns3::olsr::MessageHeader::GetHello() [member function]
cls.add_method('GetHello',
'ns3::olsr::MessageHeader::Hello &',
[])
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hello const & ns3::olsr::MessageHeader::GetHello() const [member function]
cls.add_method('GetHello',
'ns3::olsr::MessageHeader::Hello const &',
[],
is_const=True)
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hna & ns3::olsr::MessageHeader::GetHna() [member function]
cls.add_method('GetHna',
'ns3::olsr::MessageHeader::Hna &',
[])
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hna const & ns3::olsr::MessageHeader::GetHna() const [member function]
cls.add_method('GetHna',
'ns3::olsr::MessageHeader::Hna const &',
[],
is_const=True)
## olsr-header.h (module 'olsr'): uint8_t ns3::olsr::MessageHeader::GetHopCount() const [member function]
cls.add_method('GetHopCount',
'uint8_t',
[],
is_const=True)
## olsr-header.h (module 'olsr'): ns3::TypeId ns3::olsr::MessageHeader::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## olsr-header.h (module 'olsr'): uint16_t ns3::olsr::MessageHeader::GetMessageSequenceNumber() const [member function]
cls.add_method('GetMessageSequenceNumber',
'uint16_t',
[],
is_const=True)
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::MessageType ns3::olsr::MessageHeader::GetMessageType() const [member function]
cls.add_method('GetMessageType',
'ns3::olsr::MessageHeader::MessageType',
[],
is_const=True)
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Mid & ns3::olsr::MessageHeader::GetMid() [member function]
cls.add_method('GetMid',
'ns3::olsr::MessageHeader::Mid &',
[])
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Mid const & ns3::olsr::MessageHeader::GetMid() const [member function]
cls.add_method('GetMid',
'ns3::olsr::MessageHeader::Mid const &',
[],
is_const=True)
## olsr-header.h (module 'olsr'): ns3::Ipv4Address ns3::olsr::MessageHeader::GetOriginatorAddress() const [member function]
cls.add_method('GetOriginatorAddress',
'ns3::Ipv4Address',
[],
is_const=True)
## olsr-header.h (module 'olsr'): uint32_t ns3::olsr::MessageHeader::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Tc & ns3::olsr::MessageHeader::GetTc() [member function]
cls.add_method('GetTc',
'ns3::olsr::MessageHeader::Tc &',
[])
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Tc const & ns3::olsr::MessageHeader::GetTc() const [member function]
cls.add_method('GetTc',
'ns3::olsr::MessageHeader::Tc const &',
[],
is_const=True)
## olsr-header.h (module 'olsr'): uint8_t ns3::olsr::MessageHeader::GetTimeToLive() const [member function]
cls.add_method('GetTimeToLive',
'uint8_t',
[],
is_const=True)
## olsr-header.h (module 'olsr'): static ns3::TypeId ns3::olsr::MessageHeader::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## olsr-header.h (module 'olsr'): ns3::Time ns3::olsr::MessageHeader::GetVTime() const [member function]
cls.add_method('GetVTime',
'ns3::Time',
[],
is_const=True)
## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::SetHopCount(uint8_t hopCount) [member function]
cls.add_method('SetHopCount',
'void',
[param('uint8_t', 'hopCount')])
## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::SetMessageSequenceNumber(uint16_t messageSequenceNumber) [member function]
cls.add_method('SetMessageSequenceNumber',
'void',
[param('uint16_t', 'messageSequenceNumber')])
## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::SetMessageType(ns3::olsr::MessageHeader::MessageType messageType) [member function]
cls.add_method('SetMessageType',
'void',
[param('ns3::olsr::MessageHeader::MessageType', 'messageType')])
## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::SetOriginatorAddress(ns3::Ipv4Address originatorAddress) [member function]
cls.add_method('SetOriginatorAddress',
'void',
[param('ns3::Ipv4Address', 'originatorAddress')])
## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::SetTimeToLive(uint8_t timeToLive) [member function]
cls.add_method('SetTimeToLive',
'void',
[param('uint8_t', 'timeToLive')])
## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::SetVTime(ns3::Time time) [member function]
cls.add_method('SetVTime',
'void',
[param('ns3::Time', 'time')])
return
def register_Ns3OlsrMessageHeaderHello_methods(root_module, cls):
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hello::Hello() [constructor]
cls.add_constructor([])
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hello::Hello(ns3::olsr::MessageHeader::Hello const & arg0) [constructor]
cls.add_constructor([param('ns3::olsr::MessageHeader::Hello const &', 'arg0')])
## olsr-header.h (module 'olsr'): uint32_t ns3::olsr::MessageHeader::Hello::Deserialize(ns3::Buffer::Iterator start, uint32_t messageSize) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'messageSize')])
## olsr-header.h (module 'olsr'): ns3::Time ns3::olsr::MessageHeader::Hello::GetHTime() const [member function]
cls.add_method('GetHTime',
'ns3::Time',
[],
is_const=True)
## olsr-header.h (module 'olsr'): uint32_t ns3::olsr::MessageHeader::Hello::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::Hello::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::Hello::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True)
## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::Hello::SetHTime(ns3::Time time) [member function]
cls.add_method('SetHTime',
'void',
[param('ns3::Time', 'time')])
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hello::hTime [variable]
cls.add_instance_attribute('hTime', 'uint8_t', is_const=False)
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hello::linkMessages [variable]
cls.add_instance_attribute('linkMessages', 'std::vector< ns3::olsr::MessageHeader::Hello::LinkMessage >', is_const=False)
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hello::willingness [variable]
cls.add_instance_attribute('willingness', 'uint8_t', is_const=False)
return
def register_Ns3OlsrMessageHeaderHelloLinkMessage_methods(root_module, cls):
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hello::LinkMessage::LinkMessage() [constructor]
cls.add_constructor([])
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hello::LinkMessage::LinkMessage(ns3::olsr::MessageHeader::Hello::LinkMessage const & arg0) [constructor]
cls.add_constructor([param('ns3::olsr::MessageHeader::Hello::LinkMessage const &', 'arg0')])
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hello::LinkMessage::linkCode [variable]
cls.add_instance_attribute('linkCode', 'uint8_t', is_const=False)
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hello::LinkMessage::neighborInterfaceAddresses [variable]
cls.add_instance_attribute('neighborInterfaceAddresses', 'std::vector< ns3::Ipv4Address >', is_const=False)
return
def register_Ns3OlsrMessageHeaderHna_methods(root_module, cls):
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hna::Hna() [constructor]
cls.add_constructor([])
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hna::Hna(ns3::olsr::MessageHeader::Hna const & arg0) [constructor]
cls.add_constructor([param('ns3::olsr::MessageHeader::Hna const &', 'arg0')])
## olsr-header.h (module 'olsr'): uint32_t ns3::olsr::MessageHeader::Hna::Deserialize(ns3::Buffer::Iterator start, uint32_t messageSize) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'messageSize')])
## olsr-header.h (module 'olsr'): uint32_t ns3::olsr::MessageHeader::Hna::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::Hna::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::Hna::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True)
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hna::associations [variable]
cls.add_instance_attribute('associations', 'std::vector< ns3::olsr::MessageHeader::Hna::Association >', is_const=False)
return
def register_Ns3OlsrMessageHeaderHnaAssociation_methods(root_module, cls):
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hna::Association::Association() [constructor]
cls.add_constructor([])
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hna::Association::Association(ns3::olsr::MessageHeader::Hna::Association const & arg0) [constructor]
cls.add_constructor([param('ns3::olsr::MessageHeader::Hna::Association const &', 'arg0')])
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hna::Association::address [variable]
cls.add_instance_attribute('address', 'ns3::Ipv4Address', is_const=False)
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Hna::Association::mask [variable]
cls.add_instance_attribute('mask', 'ns3::Ipv4Mask', is_const=False)
return
def register_Ns3OlsrMessageHeaderMid_methods(root_module, cls):
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Mid::Mid() [constructor]
cls.add_constructor([])
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Mid::Mid(ns3::olsr::MessageHeader::Mid const & arg0) [constructor]
cls.add_constructor([param('ns3::olsr::MessageHeader::Mid const &', 'arg0')])
## olsr-header.h (module 'olsr'): uint32_t ns3::olsr::MessageHeader::Mid::Deserialize(ns3::Buffer::Iterator start, uint32_t messageSize) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'messageSize')])
## olsr-header.h (module 'olsr'): uint32_t ns3::olsr::MessageHeader::Mid::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::Mid::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::Mid::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True)
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Mid::interfaceAddresses [variable]
cls.add_instance_attribute('interfaceAddresses', 'std::vector< ns3::Ipv4Address >', is_const=False)
return
def register_Ns3OlsrMessageHeaderTc_methods(root_module, cls):
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Tc::Tc() [constructor]
cls.add_constructor([])
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Tc::Tc(ns3::olsr::MessageHeader::Tc const & arg0) [constructor]
cls.add_constructor([param('ns3::olsr::MessageHeader::Tc const &', 'arg0')])
## olsr-header.h (module 'olsr'): uint32_t ns3::olsr::MessageHeader::Tc::Deserialize(ns3::Buffer::Iterator start, uint32_t messageSize) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'messageSize')])
## olsr-header.h (module 'olsr'): uint32_t ns3::olsr::MessageHeader::Tc::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::Tc::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## olsr-header.h (module 'olsr'): void ns3::olsr::MessageHeader::Tc::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True)
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Tc::ansn [variable]
cls.add_instance_attribute('ansn', 'uint16_t', is_const=False)
## olsr-header.h (module 'olsr'): ns3::olsr::MessageHeader::Tc::neighborAddresses [variable]
cls.add_instance_attribute('neighborAddresses', 'std::vector< ns3::Ipv4Address >', is_const=False)
return
def register_Ns3OlsrMprSelectorTuple_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
## olsr-repositories.h (module 'olsr'): ns3::olsr::MprSelectorTuple::MprSelectorTuple() [constructor]
cls.add_constructor([])
## olsr-repositories.h (module 'olsr'): ns3::olsr::MprSelectorTuple::MprSelectorTuple(ns3::olsr::MprSelectorTuple const & arg0) [constructor]
cls.add_constructor([param('ns3::olsr::MprSelectorTuple const &', 'arg0')])
## olsr-repositories.h (module 'olsr'): ns3::olsr::MprSelectorTuple::expirationTime [variable]
cls.add_instance_attribute('expirationTime', 'ns3::Time', is_const=False)
## olsr-repositories.h (module 'olsr'): ns3::olsr::MprSelectorTuple::mainAddr [variable]
cls.add_instance_attribute('mainAddr', 'ns3::Ipv4Address', is_const=False)
return
def register_Ns3OlsrNeighborTuple_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_output_stream_operator()
## olsr-repositories.h (module 'olsr'): ns3::olsr::NeighborTuple::NeighborTuple() [constructor]
cls.add_constructor([])
## olsr-repositories.h (module 'olsr'): ns3::olsr::NeighborTuple::NeighborTuple(ns3::olsr::NeighborTuple const & arg0) [constructor]
cls.add_constructor([param('ns3::olsr::NeighborTuple const &', 'arg0')])
## olsr-repositories.h (module 'olsr'): ns3::olsr::NeighborTuple::neighborMainAddr [variable]
cls.add_instance_attribute('neighborMainAddr', 'ns3::Ipv4Address', is_const=False)
## olsr-repositories.h (module 'olsr'): ns3::olsr::NeighborTuple::status [variable]
cls.add_instance_attribute('status', 'ns3::olsr::NeighborTuple::Status', is_const=False)
## olsr-repositories.h (module 'olsr'): ns3::olsr::NeighborTuple::willingness [variable]
cls.add_instance_attribute('willingness', 'uint8_t', is_const=False)
return
def register_Ns3OlsrOlsrState_methods(root_module, cls):
## olsr-state.h (module 'olsr'): ns3::olsr::OlsrState::OlsrState(ns3::olsr::OlsrState const & arg0) [constructor]
cls.add_constructor([param('ns3::olsr::OlsrState const &', 'arg0')])
## olsr-state.h (module 'olsr'): ns3::olsr::OlsrState::OlsrState() [constructor]
cls.add_constructor([])
## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::EraseAssociation(ns3::olsr::Association const & tuple) [member function]
cls.add_method('EraseAssociation',
'void',
[param('ns3::olsr::Association const &', 'tuple')])
## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::EraseAssociationTuple(ns3::olsr::AssociationTuple const & tuple) [member function]
cls.add_method('EraseAssociationTuple',
'void',
[param('ns3::olsr::AssociationTuple const &', 'tuple')])
## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::EraseDuplicateTuple(ns3::olsr::DuplicateTuple const & tuple) [member function]
cls.add_method('EraseDuplicateTuple',
'void',
[param('ns3::olsr::DuplicateTuple const &', 'tuple')])
## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::EraseIfaceAssocTuple(ns3::olsr::IfaceAssocTuple const & tuple) [member function]
cls.add_method('EraseIfaceAssocTuple',
'void',
[param('ns3::olsr::IfaceAssocTuple const &', 'tuple')])
## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::EraseLinkTuple(ns3::olsr::LinkTuple const & tuple) [member function]
cls.add_method('EraseLinkTuple',
'void',
[param('ns3::olsr::LinkTuple const &', 'tuple')])
## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::EraseMprSelectorTuple(ns3::olsr::MprSelectorTuple const & tuple) [member function]
cls.add_method('EraseMprSelectorTuple',
'void',
[param('ns3::olsr::MprSelectorTuple const &', 'tuple')])
## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::EraseMprSelectorTuples(ns3::Ipv4Address const & mainAddr) [member function]
cls.add_method('EraseMprSelectorTuples',
'void',
[param('ns3::Ipv4Address const &', 'mainAddr')])
## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::EraseNeighborTuple(ns3::olsr::NeighborTuple const & neighborTuple) [member function]
cls.add_method('EraseNeighborTuple',
'void',
[param('ns3::olsr::NeighborTuple const &', 'neighborTuple')])
## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::EraseNeighborTuple(ns3::Ipv4Address const & mainAddr) [member function]
cls.add_method('EraseNeighborTuple',
'void',
[param('ns3::Ipv4Address const &', 'mainAddr')])
## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::EraseOlderTopologyTuples(ns3::Ipv4Address const & lastAddr, uint16_t ansn) [member function]
cls.add_method('EraseOlderTopologyTuples',
'void',
[param('ns3::Ipv4Address const &', 'lastAddr'), param('uint16_t', 'ansn')])
## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::EraseTopologyTuple(ns3::olsr::TopologyTuple const & tuple) [member function]
cls.add_method('EraseTopologyTuple',
'void',
[param('ns3::olsr::TopologyTuple const &', 'tuple')])
## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::EraseTwoHopNeighborTuple(ns3::olsr::TwoHopNeighborTuple const & tuple) [member function]
cls.add_method('EraseTwoHopNeighborTuple',
'void',
[param('ns3::olsr::TwoHopNeighborTuple const &', 'tuple')])
## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::EraseTwoHopNeighborTuples(ns3::Ipv4Address const & neighbor) [member function]
cls.add_method('EraseTwoHopNeighborTuples',
'void',
[param('ns3::Ipv4Address const &', 'neighbor')])
## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::EraseTwoHopNeighborTuples(ns3::Ipv4Address const & neighbor, ns3::Ipv4Address const & twoHopNeighbor) [member function]
cls.add_method('EraseTwoHopNeighborTuples',
'void',
[param('ns3::Ipv4Address const &', 'neighbor'), param('ns3::Ipv4Address const &', 'twoHopNeighbor')])
## olsr-state.h (module 'olsr'): ns3::olsr::AssociationTuple * ns3::olsr::OlsrState::FindAssociationTuple(ns3::Ipv4Address const & gatewayAddr, ns3::Ipv4Address const & networkAddr, ns3::Ipv4Mask const & netmask) [member function]
cls.add_method('FindAssociationTuple',
'ns3::olsr::AssociationTuple *',
[param('ns3::Ipv4Address const &', 'gatewayAddr'), param('ns3::Ipv4Address const &', 'networkAddr'), param('ns3::Ipv4Mask const &', 'netmask')])
## olsr-state.h (module 'olsr'): ns3::olsr::DuplicateTuple * ns3::olsr::OlsrState::FindDuplicateTuple(ns3::Ipv4Address const & address, uint16_t sequenceNumber) [member function]
cls.add_method('FindDuplicateTuple',
'ns3::olsr::DuplicateTuple *',
[param('ns3::Ipv4Address const &', 'address'), param('uint16_t', 'sequenceNumber')])
## olsr-state.h (module 'olsr'): ns3::olsr::IfaceAssocTuple * ns3::olsr::OlsrState::FindIfaceAssocTuple(ns3::Ipv4Address const & ifaceAddr) [member function]
cls.add_method('FindIfaceAssocTuple',
'ns3::olsr::IfaceAssocTuple *',
[param('ns3::Ipv4Address const &', 'ifaceAddr')])
## olsr-state.h (module 'olsr'): ns3::olsr::IfaceAssocTuple const * ns3::olsr::OlsrState::FindIfaceAssocTuple(ns3::Ipv4Address const & ifaceAddr) const [member function]
cls.add_method('FindIfaceAssocTuple',
'ns3::olsr::IfaceAssocTuple const *',
[param('ns3::Ipv4Address const &', 'ifaceAddr')],
is_const=True)
## olsr-state.h (module 'olsr'): ns3::olsr::LinkTuple * ns3::olsr::OlsrState::FindLinkTuple(ns3::Ipv4Address const & ifaceAddr) [member function]
cls.add_method('FindLinkTuple',
'ns3::olsr::LinkTuple *',
[param('ns3::Ipv4Address const &', 'ifaceAddr')])
## olsr-state.h (module 'olsr'): bool ns3::olsr::OlsrState::FindMprAddress(ns3::Ipv4Address const & address) [member function]
cls.add_method('FindMprAddress',
'bool',
[param('ns3::Ipv4Address const &', 'address')])
## olsr-state.h (module 'olsr'): ns3::olsr::MprSelectorTuple * ns3::olsr::OlsrState::FindMprSelectorTuple(ns3::Ipv4Address const & mainAddr) [member function]
cls.add_method('FindMprSelectorTuple',
'ns3::olsr::MprSelectorTuple *',
[param('ns3::Ipv4Address const &', 'mainAddr')])
## olsr-state.h (module 'olsr'): std::vector<ns3::Ipv4Address, std::allocator<ns3::Ipv4Address> > ns3::olsr::OlsrState::FindNeighborInterfaces(ns3::Ipv4Address const & neighborMainAddr) const [member function]
cls.add_method('FindNeighborInterfaces',
'std::vector< ns3::Ipv4Address >',
[param('ns3::Ipv4Address const &', 'neighborMainAddr')],
is_const=True)
## olsr-state.h (module 'olsr'): ns3::olsr::NeighborTuple * ns3::olsr::OlsrState::FindNeighborTuple(ns3::Ipv4Address const & mainAddr) [member function]
cls.add_method('FindNeighborTuple',
'ns3::olsr::NeighborTuple *',
[param('ns3::Ipv4Address const &', 'mainAddr')])
## olsr-state.h (module 'olsr'): ns3::olsr::NeighborTuple * ns3::olsr::OlsrState::FindNeighborTuple(ns3::Ipv4Address const & mainAddr, uint8_t willingness) [member function]
cls.add_method('FindNeighborTuple',
'ns3::olsr::NeighborTuple *',
[param('ns3::Ipv4Address const &', 'mainAddr'), param('uint8_t', 'willingness')])
## olsr-state.h (module 'olsr'): ns3::olsr::TopologyTuple * ns3::olsr::OlsrState::FindNewerTopologyTuple(ns3::Ipv4Address const & lastAddr, uint16_t ansn) [member function]
cls.add_method('FindNewerTopologyTuple',
'ns3::olsr::TopologyTuple *',
[param('ns3::Ipv4Address const &', 'lastAddr'), param('uint16_t', 'ansn')])
## olsr-state.h (module 'olsr'): ns3::olsr::LinkTuple * ns3::olsr::OlsrState::FindSymLinkTuple(ns3::Ipv4Address const & ifaceAddr, ns3::Time time) [member function]
cls.add_method('FindSymLinkTuple',
'ns3::olsr::LinkTuple *',
[param('ns3::Ipv4Address const &', 'ifaceAddr'), param('ns3::Time', 'time')])
## olsr-state.h (module 'olsr'): ns3::olsr::NeighborTuple const * ns3::olsr::OlsrState::FindSymNeighborTuple(ns3::Ipv4Address const & mainAddr) const [member function]
cls.add_method('FindSymNeighborTuple',
'ns3::olsr::NeighborTuple const *',
[param('ns3::Ipv4Address const &', 'mainAddr')],
is_const=True)
## olsr-state.h (module 'olsr'): ns3::olsr::TopologyTuple * ns3::olsr::OlsrState::FindTopologyTuple(ns3::Ipv4Address const & destAddr, ns3::Ipv4Address const & lastAddr) [member function]
cls.add_method('FindTopologyTuple',
'ns3::olsr::TopologyTuple *',
[param('ns3::Ipv4Address const &', 'destAddr'), param('ns3::Ipv4Address const &', 'lastAddr')])
## olsr-state.h (module 'olsr'): ns3::olsr::TwoHopNeighborTuple * ns3::olsr::OlsrState::FindTwoHopNeighborTuple(ns3::Ipv4Address const & neighbor, ns3::Ipv4Address const & twoHopNeighbor) [member function]
cls.add_method('FindTwoHopNeighborTuple',
'ns3::olsr::TwoHopNeighborTuple *',
[param('ns3::Ipv4Address const &', 'neighbor'), param('ns3::Ipv4Address const &', 'twoHopNeighbor')])
## olsr-state.h (module 'olsr'): ns3::olsr::AssociationSet const & ns3::olsr::OlsrState::GetAssociationSet() const [member function]
cls.add_method('GetAssociationSet',
'ns3::olsr::AssociationSet const &',
[],
is_const=True)
## olsr-state.h (module 'olsr'): ns3::olsr::Associations const & ns3::olsr::OlsrState::GetAssociations() const [member function]
cls.add_method('GetAssociations',
'ns3::olsr::Associations const &',
[],
is_const=True)
## olsr-state.h (module 'olsr'): ns3::olsr::IfaceAssocSet const & ns3::olsr::OlsrState::GetIfaceAssocSet() const [member function]
cls.add_method('GetIfaceAssocSet',
'ns3::olsr::IfaceAssocSet const &',
[],
is_const=True)
## olsr-state.h (module 'olsr'): ns3::olsr::IfaceAssocSet & ns3::olsr::OlsrState::GetIfaceAssocSetMutable() [member function]
cls.add_method('GetIfaceAssocSetMutable',
'ns3::olsr::IfaceAssocSet &',
[])
## olsr-state.h (module 'olsr'): ns3::olsr::LinkSet const & ns3::olsr::OlsrState::GetLinks() const [member function]
cls.add_method('GetLinks',
'ns3::olsr::LinkSet const &',
[],
is_const=True)
## olsr-state.h (module 'olsr'): ns3::olsr::MprSelectorSet const & ns3::olsr::OlsrState::GetMprSelectors() const [member function]
cls.add_method('GetMprSelectors',
'ns3::olsr::MprSelectorSet const &',
[],
is_const=True)
## olsr-state.h (module 'olsr'): ns3::olsr::MprSet ns3::olsr::OlsrState::GetMprSet() const [member function]
cls.add_method('GetMprSet',
'ns3::olsr::MprSet',
[],
is_const=True)
## olsr-state.h (module 'olsr'): ns3::olsr::NeighborSet const & ns3::olsr::OlsrState::GetNeighbors() const [member function]
cls.add_method('GetNeighbors',
'ns3::olsr::NeighborSet const &',
[],
is_const=True)
## olsr-state.h (module 'olsr'): ns3::olsr::NeighborSet & ns3::olsr::OlsrState::GetNeighbors() [member function]
cls.add_method('GetNeighbors',
'ns3::olsr::NeighborSet &',
[])
## olsr-state.h (module 'olsr'): ns3::olsr::TopologySet const & ns3::olsr::OlsrState::GetTopologySet() const [member function]
cls.add_method('GetTopologySet',
'ns3::olsr::TopologySet const &',
[],
is_const=True)
## olsr-state.h (module 'olsr'): ns3::olsr::TwoHopNeighborSet const & ns3::olsr::OlsrState::GetTwoHopNeighbors() const [member function]
cls.add_method('GetTwoHopNeighbors',
'ns3::olsr::TwoHopNeighborSet const &',
[],
is_const=True)
## olsr-state.h (module 'olsr'): ns3::olsr::TwoHopNeighborSet & ns3::olsr::OlsrState::GetTwoHopNeighbors() [member function]
cls.add_method('GetTwoHopNeighbors',
'ns3::olsr::TwoHopNeighborSet &',
[])
## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::InsertAssociation(ns3::olsr::Association const & tuple) [member function]
cls.add_method('InsertAssociation',
'void',
[param('ns3::olsr::Association const &', 'tuple')])
## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::InsertAssociationTuple(ns3::olsr::AssociationTuple const & tuple) [member function]
cls.add_method('InsertAssociationTuple',
'void',
[param('ns3::olsr::AssociationTuple const &', 'tuple')])
## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::InsertDuplicateTuple(ns3::olsr::DuplicateTuple const & tuple) [member function]
cls.add_method('InsertDuplicateTuple',
'void',
[param('ns3::olsr::DuplicateTuple const &', 'tuple')])
## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::InsertIfaceAssocTuple(ns3::olsr::IfaceAssocTuple const & tuple) [member function]
cls.add_method('InsertIfaceAssocTuple',
'void',
[param('ns3::olsr::IfaceAssocTuple const &', 'tuple')])
## olsr-state.h (module 'olsr'): ns3::olsr::LinkTuple & ns3::olsr::OlsrState::InsertLinkTuple(ns3::olsr::LinkTuple const & tuple) [member function]
cls.add_method('InsertLinkTuple',
'ns3::olsr::LinkTuple &',
[param('ns3::olsr::LinkTuple const &', 'tuple')])
## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::InsertMprSelectorTuple(ns3::olsr::MprSelectorTuple const & tuple) [member function]
cls.add_method('InsertMprSelectorTuple',
'void',
[param('ns3::olsr::MprSelectorTuple const &', 'tuple')])
## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::InsertNeighborTuple(ns3::olsr::NeighborTuple const & tuple) [member function]
cls.add_method('InsertNeighborTuple',
'void',
[param('ns3::olsr::NeighborTuple const &', 'tuple')])
## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::InsertTopologyTuple(ns3::olsr::TopologyTuple const & tuple) [member function]
cls.add_method('InsertTopologyTuple',
'void',
[param('ns3::olsr::TopologyTuple const &', 'tuple')])
## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::InsertTwoHopNeighborTuple(ns3::olsr::TwoHopNeighborTuple const & tuple) [member function]
cls.add_method('InsertTwoHopNeighborTuple',
'void',
[param('ns3::olsr::TwoHopNeighborTuple const &', 'tuple')])
## olsr-state.h (module 'olsr'): std::string ns3::olsr::OlsrState::PrintMprSelectorSet() const [member function]
cls.add_method('PrintMprSelectorSet',
'std::string',
[],
is_const=True)
## olsr-state.h (module 'olsr'): void ns3::olsr::OlsrState::SetMprSet(ns3::olsr::MprSet mprSet) [member function]
cls.add_method('SetMprSet',
'void',
[param('std::set< ns3::Ipv4Address >', 'mprSet')])
return
def register_Ns3OlsrPacketHeader_methods(root_module, cls):
cls.add_output_stream_operator()
## olsr-header.h (module 'olsr'): ns3::olsr::PacketHeader::PacketHeader(ns3::olsr::PacketHeader const & arg0) [constructor]
cls.add_constructor([param('ns3::olsr::PacketHeader const &', 'arg0')])
## olsr-header.h (module 'olsr'): ns3::olsr::PacketHeader::PacketHeader() [constructor]
cls.add_constructor([])
## olsr-header.h (module 'olsr'): uint32_t ns3::olsr::PacketHeader::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## olsr-header.h (module 'olsr'): ns3::TypeId ns3::olsr::PacketHeader::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## olsr-header.h (module 'olsr'): uint16_t ns3::olsr::PacketHeader::GetPacketLength() const [member function]
cls.add_method('GetPacketLength',
'uint16_t',
[],
is_const=True)
## olsr-header.h (module 'olsr'): uint16_t ns3::olsr::PacketHeader::GetPacketSequenceNumber() const [member function]
cls.add_method('GetPacketSequenceNumber',
'uint16_t',
[],
is_const=True)
## olsr-header.h (module 'olsr'): uint32_t ns3::olsr::PacketHeader::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## olsr-header.h (module 'olsr'): static ns3::TypeId ns3::olsr::PacketHeader::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## olsr-header.h (module 'olsr'): void ns3::olsr::PacketHeader::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## olsr-header.h (module 'olsr'): void ns3::olsr::PacketHeader::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## olsr-header.h (module 'olsr'): void ns3::olsr::PacketHeader::SetPacketLength(uint16_t length) [member function]
cls.add_method('SetPacketLength',
'void',
[param('uint16_t', 'length')])
## olsr-header.h (module 'olsr'): void ns3::olsr::PacketHeader::SetPacketSequenceNumber(uint16_t seqnum) [member function]
cls.add_method('SetPacketSequenceNumber',
'void',
[param('uint16_t', 'seqnum')])
return
def register_Ns3OlsrRoutingProtocol_methods(root_module, cls):
## olsr-routing-protocol.h (module 'olsr'): static ns3::TypeId ns3::olsr::RoutingProtocol::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## olsr-routing-protocol.h (module 'olsr'): ns3::olsr::RoutingProtocol::RoutingProtocol() [constructor]
cls.add_constructor([])
## olsr-routing-protocol.h (module 'olsr'): void ns3::olsr::RoutingProtocol::SetMainInterface(uint32_t interface) [member function]
cls.add_method('SetMainInterface',
'void',
[param('uint32_t', 'interface')])
## olsr-routing-protocol.h (module 'olsr'): void ns3::olsr::RoutingProtocol::Dump() [member function]
cls.add_method('Dump',
'void',
[])
## olsr-routing-protocol.h (module 'olsr'): std::vector<ns3::olsr::RoutingTableEntry, std::allocator<ns3::olsr::RoutingTableEntry> > ns3::olsr::RoutingProtocol::GetRoutingTableEntries() const [member function]
cls.add_method('GetRoutingTableEntries',
'std::vector< ns3::olsr::RoutingTableEntry >',
[],
is_const=True)
## olsr-routing-protocol.h (module 'olsr'): int64_t ns3::olsr::RoutingProtocol::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')])
## olsr-routing-protocol.h (module 'olsr'): std::set<unsigned int, std::less<unsigned int>, std::allocator<unsigned int> > ns3::olsr::RoutingProtocol::GetInterfaceExclusions() const [member function]
cls.add_method('GetInterfaceExclusions',
'std::set< unsigned int >',
[],
is_const=True)
## olsr-routing-protocol.h (module 'olsr'): void ns3::olsr::RoutingProtocol::SetInterfaceExclusions(std::set<unsigned int, std::less<unsigned int>, std::allocator<unsigned int> > exceptions) [member function]
cls.add_method('SetInterfaceExclusions',
'void',
[param('std::set< unsigned int >', 'exceptions')])
## olsr-routing-protocol.h (module 'olsr'): void ns3::olsr::RoutingProtocol::AddHostNetworkAssociation(ns3::Ipv4Address networkAddr, ns3::Ipv4Mask netmask) [member function]
cls.add_method('AddHostNetworkAssociation',
'void',
[param('ns3::Ipv4Address', 'networkAddr'), param('ns3::Ipv4Mask', 'netmask')])
## olsr-routing-protocol.h (module 'olsr'): void ns3::olsr::RoutingProtocol::RemoveHostNetworkAssociation(ns3::Ipv4Address networkAddr, ns3::Ipv4Mask netmask) [member function]
cls.add_method('RemoveHostNetworkAssociation',
'void',
[param('ns3::Ipv4Address', 'networkAddr'), param('ns3::Ipv4Mask', 'netmask')])
## olsr-routing-protocol.h (module 'olsr'): void ns3::olsr::RoutingProtocol::SetRoutingTableAssociation(ns3::Ptr<ns3::Ipv4StaticRouting> routingTable) [member function]
cls.add_method('SetRoutingTableAssociation',
'void',
[param('ns3::Ptr< ns3::Ipv4StaticRouting >', 'routingTable')])
## olsr-routing-protocol.h (module 'olsr'): ns3::Ptr<const ns3::Ipv4StaticRouting> ns3::olsr::RoutingProtocol::GetRoutingTableAssociation() const [member function]
cls.add_method('GetRoutingTableAssociation',
'ns3::Ptr< ns3::Ipv4StaticRouting const >',
[],
is_const=True)
## olsr-routing-protocol.h (module 'olsr'): ns3::olsr::RoutingProtocol::RoutingProtocol(ns3::olsr::RoutingProtocol const & arg0) [constructor]
cls.add_constructor([param('ns3::olsr::RoutingProtocol const &', 'arg0')])
## olsr-routing-protocol.h (module 'olsr'): void ns3::olsr::RoutingProtocol::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
## olsr-routing-protocol.h (module 'olsr'): ns3::Ptr<ns3::Ipv4Route> ns3::olsr::RoutingProtocol::RouteOutput(ns3::Ptr<ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<ns3::NetDevice> oif, ns3::Socket::SocketErrno & sockerr) [member function]
cls.add_method('RouteOutput',
'ns3::Ptr< ns3::Ipv4Route >',
[param('ns3::Ptr< ns3::Packet >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice >', 'oif'), param('ns3::Socket::SocketErrno &', 'sockerr')],
visibility='private', is_virtual=True)
## olsr-routing-protocol.h (module 'olsr'): bool ns3::olsr::RoutingProtocol::RouteInput(ns3::Ptr<const ns3::Packet> p, ns3::Ipv4Header const & header, ns3::Ptr<const ns3::NetDevice> idev, ns3::Ipv4RoutingProtocol::UnicastForwardCallback ucb, ns3::Ipv4RoutingProtocol::MulticastForwardCallback mcb, ns3::Ipv4RoutingProtocol::LocalDeliverCallback lcb, ns3::Ipv4RoutingProtocol::ErrorCallback ecb) [member function]
cls.add_method('RouteInput',
'bool',
[param('ns3::Ptr< ns3::Packet const >', 'p'), param('ns3::Ipv4Header const &', 'header'), param('ns3::Ptr< ns3::NetDevice const >', 'idev'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4Route >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ucb'), param('ns3::Callback< void, ns3::Ptr< ns3::Ipv4MulticastRoute >, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'mcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'lcb'), param('ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::Ipv4Header const &, ns3::Socket::SocketErrno, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'ecb')],
visibility='private', is_virtual=True)
## olsr-routing-protocol.h (module 'olsr'): void ns3::olsr::RoutingProtocol::NotifyInterfaceUp(uint32_t interface) [member function]
cls.add_method('NotifyInterfaceUp',
'void',
[param('uint32_t', 'interface')],
visibility='private', is_virtual=True)
## olsr-routing-protocol.h (module 'olsr'): void ns3::olsr::RoutingProtocol::NotifyInterfaceDown(uint32_t interface) [member function]
cls.add_method('NotifyInterfaceDown',
'void',
[param('uint32_t', 'interface')],
visibility='private', is_virtual=True)
## olsr-routing-protocol.h (module 'olsr'): void ns3::olsr::RoutingProtocol::NotifyAddAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('NotifyAddAddress',
'void',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
visibility='private', is_virtual=True)
## olsr-routing-protocol.h (module 'olsr'): void ns3::olsr::RoutingProtocol::NotifyRemoveAddress(uint32_t interface, ns3::Ipv4InterfaceAddress address) [member function]
cls.add_method('NotifyRemoveAddress',
'void',
[param('uint32_t', 'interface'), param('ns3::Ipv4InterfaceAddress', 'address')],
visibility='private', is_virtual=True)
## olsr-routing-protocol.h (module 'olsr'): void ns3::olsr::RoutingProtocol::SetIpv4(ns3::Ptr<ns3::Ipv4> ipv4) [member function]
cls.add_method('SetIpv4',
'void',
[param('ns3::Ptr< ns3::Ipv4 >', 'ipv4')],
visibility='private', is_virtual=True)
## olsr-routing-protocol.h (module 'olsr'): void ns3::olsr::RoutingProtocol::PrintRoutingTable(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Time::Unit unit=::ns3::Time::Unit::S) const [member function]
cls.add_method('PrintRoutingTable',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Time::Unit', 'unit', default_value='::ns3::Time::Unit::S')],
is_const=True, visibility='private', is_virtual=True)
## olsr-routing-protocol.h (module 'olsr'): void ns3::olsr::RoutingProtocol::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3OlsrRoutingTableEntry_methods(root_module, cls):
## olsr-routing-protocol.h (module 'olsr'): ns3::olsr::RoutingTableEntry::RoutingTableEntry(ns3::olsr::RoutingTableEntry const & arg0) [constructor]
cls.add_constructor([param('ns3::olsr::RoutingTableEntry const &', 'arg0')])
## olsr-routing-protocol.h (module 'olsr'): ns3::olsr::RoutingTableEntry::RoutingTableEntry() [constructor]
cls.add_constructor([])
## olsr-routing-protocol.h (module 'olsr'): ns3::olsr::RoutingTableEntry::destAddr [variable]
cls.add_instance_attribute('destAddr', 'ns3::Ipv4Address', is_const=False)
## olsr-routing-protocol.h (module 'olsr'): ns3::olsr::RoutingTableEntry::distance [variable]
cls.add_instance_attribute('distance', 'uint32_t', is_const=False)
## olsr-routing-protocol.h (module 'olsr'): ns3::olsr::RoutingTableEntry::interface [variable]
cls.add_instance_attribute('interface', 'uint32_t', is_const=False)
## olsr-routing-protocol.h (module 'olsr'): ns3::olsr::RoutingTableEntry::nextAddr [variable]
cls.add_instance_attribute('nextAddr', 'ns3::Ipv4Address', is_const=False)
return
def register_Ns3OlsrTopologyTuple_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_output_stream_operator()
## olsr-repositories.h (module 'olsr'): ns3::olsr::TopologyTuple::TopologyTuple() [constructor]
cls.add_constructor([])
## olsr-repositories.h (module 'olsr'): ns3::olsr::TopologyTuple::TopologyTuple(ns3::olsr::TopologyTuple const & arg0) [constructor]
cls.add_constructor([param('ns3::olsr::TopologyTuple const &', 'arg0')])
## olsr-repositories.h (module 'olsr'): ns3::olsr::TopologyTuple::destAddr [variable]
cls.add_instance_attribute('destAddr', 'ns3::Ipv4Address', is_const=False)
## olsr-repositories.h (module 'olsr'): ns3::olsr::TopologyTuple::expirationTime [variable]
cls.add_instance_attribute('expirationTime', 'ns3::Time', is_const=False)
## olsr-repositories.h (module 'olsr'): ns3::olsr::TopologyTuple::lastAddr [variable]
cls.add_instance_attribute('lastAddr', 'ns3::Ipv4Address', is_const=False)
## olsr-repositories.h (module 'olsr'): ns3::olsr::TopologyTuple::sequenceNumber [variable]
cls.add_instance_attribute('sequenceNumber', 'uint16_t', is_const=False)
return
def register_Ns3OlsrTwoHopNeighborTuple_methods(root_module, cls):
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## olsr-repositories.h (module 'olsr'): ns3::olsr::TwoHopNeighborTuple::TwoHopNeighborTuple() [constructor]
cls.add_constructor([])
## olsr-repositories.h (module 'olsr'): ns3::olsr::TwoHopNeighborTuple::TwoHopNeighborTuple(ns3::olsr::TwoHopNeighborTuple const & arg0) [constructor]
cls.add_constructor([param('ns3::olsr::TwoHopNeighborTuple const &', 'arg0')])
## olsr-repositories.h (module 'olsr'): ns3::olsr::TwoHopNeighborTuple::expirationTime [variable]
cls.add_instance_attribute('expirationTime', 'ns3::Time', is_const=False)
## olsr-repositories.h (module 'olsr'): ns3::olsr::TwoHopNeighborTuple::neighborMainAddr [variable]
cls.add_instance_attribute('neighborMainAddr', 'ns3::Ipv4Address', is_const=False)
## olsr-repositories.h (module 'olsr'): ns3::olsr::TwoHopNeighborTuple::twoHopNeighborAddr [variable]
cls.add_instance_attribute('twoHopNeighborAddr', 'ns3::Ipv4Address', is_const=False)
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.add_cpp_namespace('FatalImpl'), root_module)
register_functions_ns3_Hash(module.add_cpp_namespace('Hash'), root_module)
register_functions_ns3_TracedValueCallback(module.add_cpp_namespace('TracedValueCallback'), root_module)
register_functions_ns3_olsr(module.add_cpp_namespace('olsr'), root_module)
register_functions_ns3_tests(module.add_cpp_namespace('tests'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.add_cpp_namespace('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def register_functions_ns3_TracedValueCallback(module, root_module):
return
def register_functions_ns3_olsr(module, root_module):
## olsr-header.h (module 'olsr'): double ns3::olsr::EmfToSeconds(uint8_t emf) [free function]
module.add_function('EmfToSeconds',
'double',
[param('uint8_t', 'emf')])
## olsr-header.h (module 'olsr'): uint8_t ns3::olsr::SecondsToEmf(double seconds) [free function]
module.add_function('SecondsToEmf',
'uint8_t',
[param('double', 'seconds')])
return
def register_functions_ns3_tests(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
|
tomhenderson/ns-3-dev-git
|
src/olsr/bindings/modulegen__gcc_LP64.py
|
Python
|
gpl-2.0
| 542,189
|
import uwsgi
from os import path
uwsgi.snmp_set_counter64(1, 0) # Number of requests
uwsgi.snmp_set_counter64(2, 0) # Number of bytes
def application(environ, start_response):
size = path.getsize('logo_uWSGI.png')
start_response('200 OK', [('Content-Type', 'image/png'), ('Content-Length', str(size))])
fd = open('logo_uWSGI.png')
uwsgi.snmp_incr_counter64(1)
uwsgi.snmp_incr_counter64(2, size)
return environ['wsgi.file_wrapper'](fd, 4096)
|
blablacio/uwsgi
|
examples/staticfilesnmp.py
|
Python
|
gpl-2.0
| 470
|
# Copyright 2009 Google Inc. Released under the GPL v2
"""This is a convenience module to import all available types of hosts.
Implementation details:
You should 'import hosts' instead of importing every available host module.
"""
from autotest_lib.client.common_lib import utils
import base_classes
Host = utils.import_site_class(
__file__, "autotest_lib.client.common_lib.hosts.site_host", "SiteHost",
base_classes.Host)
|
clebergnu/autotest
|
client/common_lib/hosts/__init__.py
|
Python
|
gpl-2.0
| 435
|
# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt, getdate, formatdate, cstr
from erpnext.accounts.report.financial_statements import filter_accounts, filter_out_zero_value_rows
from erpnext.accounts.report.trial_balance.trial_balance import validate_filters
value_fields = ("income", "expense", "gross_profit_loss")
def execute(filters=None):
if not filters.get('based_on'): filters["based_on"] = 'Cost Center'
based_on = filters.based_on.replace(' ', '_').lower()
validate_filters(filters)
accounts = get_accounts_data(based_on, filters.get("company"))
data = get_data(accounts, filters, based_on)
columns = get_columns(filters)
return columns, data
def get_accounts_data(based_on, company):
if based_on == 'cost_center':
return frappe.db.sql("""select name, parent_cost_center as parent_account, cost_center_name as account_name, lft, rgt
from `tabCost Center` where company=%s order by name""", company, as_dict=True)
else:
return frappe.get_all('Project', fields = ["name"], filters = {'company': company}, order_by = 'name')
def get_data(accounts, filters, based_on):
if not accounts:
return []
accounts, accounts_by_name, parent_children_map = filter_accounts(accounts)
gl_entries_by_account = {}
set_gl_entries_by_account(filters.get("company"), filters.get("from_date"),
filters.get("to_date"), based_on, gl_entries_by_account, ignore_closing_entries=not flt(filters.get("with_period_closing_entry")))
total_row = calculate_values(accounts, gl_entries_by_account, filters)
accumulate_values_into_parents(accounts, accounts_by_name)
data = prepare_data(accounts, filters, total_row, parent_children_map, based_on)
data = filter_out_zero_value_rows(data, parent_children_map,
show_zero_values=filters.get("show_zero_values"))
return data
def calculate_values(accounts, gl_entries_by_account, filters):
init = {
"income": 0.0,
"expense": 0.0,
"gross_profit_loss": 0.0
}
total_row = {
"cost_center": None,
"account_name": "'" + _("Total") + "'",
"warn_if_negative": True,
"income": 0.0,
"expense": 0.0,
"gross_profit_loss": 0.0,
"account": "'" + _("Total") + "'",
"parent_account": None,
"indent": 0,
"has_value": True
}
for d in accounts:
d.update(init.copy())
# add opening
for entry in gl_entries_by_account.get(d.name, []):
if cstr(entry.is_opening) != "Yes":
if entry.type == 'Income':
d["income"] += flt(entry.credit) - flt(entry.debit)
if entry.type == 'Expense':
d["expense"] += flt(entry.debit) - flt(entry.credit)
d["gross_profit_loss"] = d.get("income") - d.get("expense")
total_row["income"] += d["income"]
total_row["expense"] += d["expense"]
total_row["gross_profit_loss"] = total_row.get("income") - total_row.get("expense")
return total_row
def accumulate_values_into_parents(accounts, accounts_by_name):
for d in reversed(accounts):
if d.parent_account:
for key in value_fields:
accounts_by_name[d.parent_account][key] += d[key]
def prepare_data(accounts, filters, total_row, parent_children_map, based_on):
data = []
company_currency = frappe.get_cached_value('Company', filters.get("company"), "default_currency")
for d in accounts:
has_value = False
row = {
"account_name": d.account_name or d.name,
"account": d.name,
"parent_account": d.parent_account,
"indent": d.indent,
"fiscal_year": filters.get("fiscal_year"),
"currency": company_currency,
"based_on": based_on
}
for key in value_fields:
row[key] = flt(d.get(key, 0.0), 3)
if abs(row[key]) >= 0.005:
# ignore zero values
has_value = True
row["has_value"] = has_value
data.append(row)
data.extend([{},total_row])
return data
def get_columns(filters):
return [
{
"fieldname": "account",
"label": _(filters.get("based_on")),
"fieldtype": "Link",
"options": filters.get("based_on"),
"width": 300
},
{
"fieldname": "income",
"label": _("Income"),
"fieldtype": "Currency",
"options": "currency",
"width": 120
},
{
"fieldname": "expense",
"label": _("Expense"),
"fieldtype": "Currency",
"options": "currency",
"width": 120
},
{
"fieldname": "gross_profit_loss",
"label": _("Gross Profit / Loss"),
"fieldtype": "Currency",
"options": "currency",
"width": 120
},
{
"fieldname": "currency",
"label": _("Currency"),
"fieldtype": "Link",
"options": "Currency",
"hidden": 1
}
]
def set_gl_entries_by_account(company, from_date, to_date, based_on, gl_entries_by_account,
ignore_closing_entries=False):
"""Returns a dict like { "account": [gl entries], ... }"""
additional_conditions = []
if ignore_closing_entries:
additional_conditions.append("and ifnull(voucher_type, '')!='Period Closing Voucher'")
if from_date:
additional_conditions.append("and posting_date >= %(from_date)s")
gl_entries = frappe.db.sql("""select posting_date, {based_on} as based_on, debit, credit,
is_opening, (select root_type from `tabAccount` where name = account) as type
from `tabGL Entry` where company=%(company)s
{additional_conditions}
and posting_date <= %(to_date)s
and {based_on} is not null
order by {based_on}, posting_date""".format(additional_conditions="\n".join(additional_conditions), based_on= based_on),
{
"company": company,
"from_date": from_date,
"to_date": to_date
},
as_dict=True)
for entry in gl_entries:
gl_entries_by_account.setdefault(entry.based_on, []).append(entry)
return gl_entries_by_account
|
chdecultot/erpnext
|
erpnext/accounts/report/profitability_analysis/profitability_analysis.py
|
Python
|
gpl-3.0
| 5,688
|
#!/usr/bin/env python
'''
Loader for all 2009 Dorado missions written for Monique's notice of bad
depths in Dorado389_2009_084_02_084_02_decim.nc.
Mike McCann
MBARI 15 January 2013
@var __date__: Date of last svn commit
@undocumented: __doc__ parser
@status: production
@license: GPL
'''
import os
import sys
import datetime
parentDir = os.path.join(os.path.dirname(__file__), "../")
sys.path.insert(0, parentDir) # So that CANON is found
from CANON import CANONLoader
import timing
cl = CANONLoader('stoqs_dorado2009', 'Dorado - All 2009 missions',
description = 'In Monterey Bay and Santa Monica Basin - includes processed Gulper Samples',
x3dTerrains = {
'https://stoqs.mbari.org/x3d/Monterey25_10x/Monterey25_10x_scene.x3d': {
'position': '-2822317.31255 -4438600.53640 3786150.85474',
'orientation': '0.89575 -0.31076 -0.31791 1.63772',
'centerOfRotation': '-2711557.9403829873 -4331414.329506527 3801353.4691465236',
'VerticalExaggeration': '10',
'speed': '.1',
}
},
grdTerrain = os.path.join(parentDir, 'Monterey25.grd')
)
# Dorado surveys in 2009
cl.dorado_base = 'http://dods.mbari.org/opendap/data/auvctd/surveys/2009/netcdf/'
cl.dorado_files = [
'Dorado389_2009_055_05_055_05_decim.nc',
'Dorado389_2009_084_00_084_00_decim.nc',
'Dorado389_2009_084_02_084_02_decim.nc',
'Dorado389_2009_085_02_085_02_decim.nc',
'Dorado389_2009_111_00_111_00_decim.nc',
'Dorado389_2009_111_01_111_01_decim.nc',
'Dorado389_2009_112_07_112_07_decim.nc',
'Dorado389_2009_113_00_113_00_decim.nc',
'Dorado389_2009_124_03_124_03_decim.nc',
'Dorado389_2009_125_00_125_00_decim.nc',
'Dorado389_2009_126_00_126_00_decim.nc',
'Dorado389_2009_152_00_152_00_decim.nc',
'Dorado389_2009_153_01_153_01_decim.nc',
'Dorado389_2009_154_00_154_00_decim.nc',
'Dorado389_2009_155_03_155_03_decim.nc',
'Dorado389_2009_182_01_182_01_decim.nc',
'Dorado389_2009_272_00_272_00_decim.nc',
'Dorado389_2009_274_03_274_03_decim.nc',
'Dorado389_2009_278_01_278_01_decim.nc',
'Dorado389_2009_278_01_278_02_decim.nc',
'Dorado389_2009_279_00_279_00_decim.nc',
'Dorado389_2009_280_00_280_00_decim.nc',
'Dorado389_2009_281_01_281_01_decim.nc',
'Dorado389_2009_308_04_308_04_decim.nc',
'Dorado389_2009_309_00_309_03_decim.nc',
'Dorado389_2009_313_02_313_02_decim.nc',
'Dorado389_2009_342_04_342_04_decim.nc',
'Dorado389_2009_348_05_348_05_decim.nc',
]
cl.dorado_parms = [ 'temperature', 'oxygen', 'nitrate', 'bbp420', 'bbp700',
'fl700_uncorr', 'salinity', 'biolume',
'sepCountList', 'mepCountList',
'roll', 'pitch', 'yaw']
# Mooring M1ts
cl.m1ts_base = 'http://elvis.shore.mbari.org/thredds/dodsC/agg/'
cl.m1ts_files = ['OS_MBARI-M1_R_TS']
cl.m1ts_parms = [ 'PSAL', 'TEMP' ]
cl.m1ts_startDatetime = datetime.datetime(2009, 1, 1)
cl.m1ts_endDatetime = datetime.datetime(2009, 12, 31)
# Mooring M1met
cl.m1met_base = 'http://elvis.shore.mbari.org/thredds/dodsC/agg/'
cl.m1met_files = ['OS_MBARI-M1_R_M']
cl.m1met_parms = [ 'WSPD', 'WDIR', 'ATMP', 'SW', 'RELH' ]
cl.m1met_startDatetime = datetime.datetime(2009, 1, 1)
cl.m1met_endDatetime = datetime.datetime(2009, 12, 31)
# Execute the load
cl.process_command_line()
if cl.args.test:
cl.loadDorado(stride=20)
##cl.loadM1ts(stride=10)
##cl.loadM1met(stride=10)
elif cl.args.optimal_stride:
cl.loadDorado(stride=2)
cl.loadM1ts(stride=1)
cl.loadM1met(stride=1)
else:
cl.loadDorado(stride=cl.args.stride)
##cl.loadM1ts(stride=cl.args.stride)
##cl.loadM1met(stride=cl.args.stride)
# Add any X3D Terrain information specified in the constructor to the database - must be done after a load is executed
cl.addTerrainResources()
print("All Done.")
|
duane-edgington/stoqs
|
stoqs/loaders/MolecularEcology/load_dorado2009.py
|
Python
|
gpl-3.0
| 4,550
|
##############################################################################
#
# Copyright (C) 2009 EduSense BV (<http://www.edusense.nl>).
# Copyright (C) 2011 credativ Ltd (<http://www.credativ.co.uk>).
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import date
from openerp.osv import orm, fields
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as OE_DATEFORMAT
class hsbc_export(orm.Model):
"""HSBC Export"""
_name = 'banking.export.hsbc'
_description = __doc__
_rec_name = 'execution_date'
_columns = {
'payment_order_ids': fields.many2many(
'payment.order',
'account_payment_order_hsbc_rel',
'banking_export_hsbc_id', 'account_order_id',
'Payment Orders',
readonly=True),
'identification':
fields.char('Identification', size=15, readonly=True, select=True),
'execution_date':
fields.date('Execution Date', readonly=True),
'no_transactions':
fields.integer('Number of Transactions', readonly=True),
'total_amount':
fields.float('Total Amount', readonly=True),
'date_generated':
fields.datetime('Generation Date', readonly=True, select=True),
'file':
fields.binary('HSBC File', readonly=True),
'state':
fields.selection([
('draft', 'Draft'),
('sent', 'Sent'),
('done', 'Reconciled'),
], 'State', readonly=True),
}
_defaults = {
'date_generated': lambda *a: date.today().strftime(OE_DATEFORMAT),
'state': 'draft',
}
class payment_line(orm.Model):
"""The standard payment order is using a mixture of details from the
partner record and the res.partner.bank record. For, instance, the account
holder name is coming from the res.partner.bank record, but the company
name and address are coming from the partner address record. This is
problematic because the HSBC payment format is validating for alphanumeric
characters in the company name and address. So, "Great Company Ltd." and
"Great Company s.a." will cause an error because they have full-stops in
the name.
A better approach is to use the name and address details from the
res.partner.bank record always. This way, the address details can be
sanitized for the payments, whilst being able to print the proper name and
address throughout the rest of the system e.g. on invoices.
"""
_inherit = 'payment.line'
def info_owner(self, cr, uid, ids, name=None, args=None, context=None):
if not ids:
return {}
result = {}
info = ''
for line in self.browse(cr, uid, ids, context=context):
owner = line.order_id.mode.bank_id
name = owner.owner_name or owner.partner_id.name
st = owner.street and owner.street or ''
st1 = '' # no street2 in res.partner.bank
zip = owner.zip and owner.zip or ''
city = owner.city and owner.city or ''
zip_city = zip + ' ' + city
cntry = owner.country_id and owner.country_id.name or ''
info = name + "\n".join((st + " ", st1, zip_city, cntry))
result[line.id] = info
return result
def info_partner(self, cr, uid, ids, name=None, args=None, context=None):
if not ids:
return {}
result = {}
info = ''
for line in self.browse(cr, uid, ids, context=context):
partner = line.bank_id
name = partner.owner_name or partner.partner_id.name
st = partner.street and partner.street or ''
st1 = '' # no street2 in res.partner.bank
zip = partner.zip and partner.zip or ''
city = partner.city and partner.city or ''
zip_city = zip + ' ' + city
cntry = partner.country_id and partner.country_id.name or ''
info = name + "\n".join((st + " ", st1, zip_city, cntry))
result[line.id] = info
return result
# Define the info_partner and info_owner so we can override the methods
_columns = {
'info_owner': fields.function(
info_owner,
string="Owner Account",
type="text",
help='Address of the Main Partner',
),
'info_partner': fields.function(
info_partner,
string="Destination Account",
type="text",
help='Address of the Ordering Customer.'
),
}
|
rschnapka/bank-payment
|
account_banking_uk_hsbc/account_banking_uk_hsbc.py
|
Python
|
agpl-3.0
| 5,362
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'author': 'ADHOC SA',
'category': 'Accounting & Finance',
'demo_xml': [],
'depends': ['account'],
'description': '''
Account Invoice Prices Update
=============================
''',
'installable': True,
'name': 'Account Invoice Prices Update',
'test': [],
'data': [
'wizard/update_prices_wizard_view.xml',
'views/invoice_view.xml',
],
'version': '8.0.0.0.1',
'website': 'www.adhoc.com.ar',
'license': 'AGPL-3'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
sysadminmatmoz/ingadhoc
|
account_invoice_prices_update/__openerp__.py
|
Python
|
agpl-3.0
| 1,508
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
import pytest
from llnl.util.filesystem import touch, working_dir
import spack.repo
import spack.config
from spack.spec import Spec
from spack.version import ver
from spack.util.executable import which
pytestmark = pytest.mark.skipif(
not which('svn'), reason='requires subversion to be installed')
@pytest.mark.parametrize("type_of_test", ['default', 'rev0'])
@pytest.mark.parametrize("secure", [True, False])
def test_fetch(
type_of_test,
secure,
mock_svn_repository,
config,
mutable_mock_packages
):
"""Tries to:
1. Fetch the repo using a fetch strategy constructed with
supplied args (they depend on type_of_test).
2. Check if the test_file is in the checked out repository.
3. Assert that the repository is at the revision supplied.
4. Add and remove some files, then reset the repo, and
ensure it's all there again.
"""
# Retrieve the right test parameters
t = mock_svn_repository.checks[type_of_test]
h = mock_svn_repository.hash
# Construct the package under test
spec = Spec('svn-test')
spec.concretize()
pkg = spack.repo.get(spec)
pkg.versions[ver('svn')] = t.args
# Enter the stage directory and check some properties
with pkg.stage:
with spack.config.override('config:verify_ssl', secure):
pkg.do_stage()
with working_dir(pkg.stage.source_path):
assert h() == t.revision
file_path = os.path.join(pkg.stage.source_path, t.file)
assert os.path.isdir(pkg.stage.source_path)
assert os.path.isfile(file_path)
os.unlink(file_path)
assert not os.path.isfile(file_path)
untracked_file = 'foobarbaz'
touch(untracked_file)
assert os.path.isfile(untracked_file)
pkg.do_restage()
assert not os.path.isfile(untracked_file)
assert os.path.isdir(pkg.stage.source_path)
assert os.path.isfile(file_path)
assert h() == t.revision
|
tmerrick1/spack
|
lib/spack/spack/test/svn_fetch.py
|
Python
|
lgpl-2.1
| 3,303
|
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/MSCommon/sdk.py rel_2.3.5:3347:d31d5a4e74b6 2015/07/31 14:36:10 bdbaddog"
__doc__ = """Module to detect the Platform/Windows SDK
PSDK 2003 R1 is the earliest version detected.
"""
import os
import SCons.Errors
import SCons.Util
import common
debug = common.debug
# SDK Checks. This is of course a mess as everything else on MS platforms. Here
# is what we do to detect the SDK:
#
# For Windows SDK >= 6.0: just look into the registry entries:
# HKLM\Software\Microsoft\Microsoft SDKs\Windows
# All the keys in there are the available versions.
#
# For Platform SDK before 6.0 (2003 server R1 and R2, etc...), there does not
# seem to be any sane registry key, so the precise location is hardcoded.
#
# For versions below 2003R1, it seems the PSDK is included with Visual Studio?
#
# Also, per the following:
# http://benjamin.smedbergs.us/blog/tag/atl/
# VC++ Professional comes with the SDK, VC++ Express does not.
# Location of the SDK (checked for 6.1 only)
_CURINSTALLED_SDK_HKEY_ROOT = \
r"Software\Microsoft\Microsoft SDKs\Windows\CurrentInstallFolder"
class SDKDefinition(object):
"""
An abstract base class for trying to find installed SDK directories.
"""
def __init__(self, version, **kw):
self.version = version
self.__dict__.update(kw)
def find_sdk_dir(self):
"""Try to find the MS SDK from the registry.
Return None if failed or the directory does not exist.
"""
if not SCons.Util.can_read_reg:
debug('find_sdk_dir(): can not read registry')
return None
hkey = self.HKEY_FMT % self.hkey_data
debug('find_sdk_dir(): checking registry:%s'%hkey)
try:
sdk_dir = common.read_reg(hkey)
except WindowsError, e:
debug('find_sdk_dir(): no SDK registry key %s' % repr(hkey))
return None
debug('find_sdk_dir(): Trying SDK Dir: %s'%sdk_dir)
if not os.path.exists(sdk_dir):
debug('find_sdk_dir(): %s not on file system' % sdk_dir)
return None
ftc = os.path.join(sdk_dir, self.sanity_check_file)
if not os.path.exists(ftc):
debug("find_sdk_dir(): sanity check %s not found" % ftc)
return None
return sdk_dir
def get_sdk_dir(self):
"""Return the MSSSDK given the version string."""
try:
return self._sdk_dir
except AttributeError:
sdk_dir = self.find_sdk_dir()
self._sdk_dir = sdk_dir
return sdk_dir
def get_sdk_vc_script(self,host_arch, target_arch):
""" Return the script to initialize the VC compiler installed by SDK
"""
if (host_arch == 'amd64' and target_arch == 'x86'):
# No cross tools needed compiling 32 bits on 64 bit machine
host_arch=target_arch
arch_string=target_arch
if (host_arch != target_arch):
arch_string='%s_%s'%(host_arch,target_arch)
debug("sdk.py: get_sdk_vc_script():arch_string:%s host_arch:%s target_arch:%s"%(arch_string,
host_arch,
target_arch))
file=self.vc_setup_scripts.get(arch_string,None)
debug("sdk.py: get_sdk_vc_script():file:%s"%file)
return file
class WindowsSDK(SDKDefinition):
"""
A subclass for trying to find installed Windows SDK directories.
"""
HKEY_FMT = r'Software\Microsoft\Microsoft SDKs\Windows\v%s\InstallationFolder'
def __init__(self, *args, **kw):
SDKDefinition.__init__(self, *args, **kw)
self.hkey_data = self.version
class PlatformSDK(SDKDefinition):
"""
A subclass for trying to find installed Platform SDK directories.
"""
HKEY_FMT = r'Software\Microsoft\MicrosoftSDK\InstalledSDKS\%s\Install Dir'
def __init__(self, *args, **kw):
SDKDefinition.__init__(self, *args, **kw)
self.hkey_data = self.uuid
#
# The list of VC initialization scripts installed by the SDK
# These should be tried if the vcvarsall.bat TARGET_ARCH fails
preSDK61VCSetupScripts = { 'x86' : r'bin\vcvars32.bat',
'amd64' : r'bin\vcvarsamd64.bat',
'x86_amd64': r'bin\vcvarsx86_amd64.bat',
'x86_ia64' : r'bin\vcvarsx86_ia64.bat',
'ia64' : r'bin\vcvarsia64.bat'}
SDK61VCSetupScripts = {'x86' : r'bin\vcvars32.bat',
'amd64' : r'bin\amd64\vcvarsamd64.bat',
'x86_amd64': r'bin\x86_amd64\vcvarsx86_amd64.bat',
'x86_ia64' : r'bin\x86_ia64\vcvarsx86_ia64.bat',
'ia64' : r'bin\ia64\vcvarsia64.bat'}
SDK70VCSetupScripts = { 'x86' : r'bin\vcvars32.bat',
'amd64' : r'bin\vcvars64.bat',
'x86_amd64': r'bin\vcvarsx86_amd64.bat',
'x86_ia64' : r'bin\vcvarsx86_ia64.bat',
'ia64' : r'bin\vcvarsia64.bat'}
# The list of support SDKs which we know how to detect.
#
# The first SDK found in the list is the one used by default if there
# are multiple SDKs installed. Barring good reasons to the contrary,
# this means we should list SDKs with from most recent to oldest.
#
# If you update this list, update the documentation in Tool/mssdk.xml.
SupportedSDKList = [
WindowsSDK('7.1',
sanity_check_file=r'bin\SetEnv.Cmd',
include_subdir='include',
lib_subdir={
'x86' : ['lib'],
'x86_64' : [r'lib\x64'],
'ia64' : [r'lib\ia64'],
},
vc_setup_scripts = SDK70VCSetupScripts,
),
WindowsSDK('7.0A',
sanity_check_file=r'bin\SetEnv.Cmd',
include_subdir='include',
lib_subdir={
'x86' : ['lib'],
'x86_64' : [r'lib\x64'],
'ia64' : [r'lib\ia64'],
},
vc_setup_scripts = SDK70VCSetupScripts,
),
WindowsSDK('7.0',
sanity_check_file=r'bin\SetEnv.Cmd',
include_subdir='include',
lib_subdir={
'x86' : ['lib'],
'x86_64' : [r'lib\x64'],
'ia64' : [r'lib\ia64'],
},
vc_setup_scripts = SDK70VCSetupScripts,
),
WindowsSDK('6.1',
sanity_check_file=r'bin\SetEnv.Cmd',
include_subdir='include',
lib_subdir={
'x86' : ['lib'],
'x86_64' : [r'lib\x64'],
'ia64' : [r'lib\ia64'],
},
vc_setup_scripts = SDK61VCSetupScripts,
),
WindowsSDK('6.0A',
sanity_check_file=r'include\windows.h',
include_subdir='include',
lib_subdir={
'x86' : ['lib'],
'x86_64' : [r'lib\x64'],
'ia64' : [r'lib\ia64'],
},
vc_setup_scripts = preSDK61VCSetupScripts,
),
WindowsSDK('6.0',
sanity_check_file=r'bin\gacutil.exe',
include_subdir='include',
lib_subdir='lib',
vc_setup_scripts = preSDK61VCSetupScripts,
),
PlatformSDK('2003R2',
sanity_check_file=r'SetEnv.Cmd',
uuid="D2FF9F89-8AA2-4373-8A31-C838BF4DBBE1",
vc_setup_scripts = preSDK61VCSetupScripts,
),
PlatformSDK('2003R1',
sanity_check_file=r'SetEnv.Cmd',
uuid="8F9E5EF3-A9A5-491B-A889-C58EFFECE8B3",
vc_setup_scripts = preSDK61VCSetupScripts,
),
]
SupportedSDKMap = {}
for sdk in SupportedSDKList:
SupportedSDKMap[sdk.version] = sdk
# Finding installed SDKs isn't cheap, because it goes not only to the
# registry but also to the disk to sanity-check that there is, in fact,
# an SDK installed there and that the registry entry isn't just stale.
# Find this information once, when requested, and cache it.
InstalledSDKList = None
InstalledSDKMap = None
def get_installed_sdks():
global InstalledSDKList
global InstalledSDKMap
debug('sdk.py:get_installed_sdks()')
if InstalledSDKList is None:
InstalledSDKList = []
InstalledSDKMap = {}
for sdk in SupportedSDKList:
debug('MSCommon/sdk.py: trying to find SDK %s' % sdk.version)
if sdk.get_sdk_dir():
debug('MSCommon/sdk.py:found SDK %s' % sdk.version)
InstalledSDKList.append(sdk)
InstalledSDKMap[sdk.version] = sdk
return InstalledSDKList
# We may be asked to update multiple construction environments with
# SDK information. When doing this, we check on-disk for whether
# the SDK has 'mfc' and 'atl' subdirectories. Since going to disk
# is expensive, cache results by directory.
SDKEnvironmentUpdates = {}
def set_sdk_by_directory(env, sdk_dir):
global SDKEnvironmentUpdates
debug('set_sdk_by_directory: Using dir:%s'%sdk_dir)
try:
env_tuple_list = SDKEnvironmentUpdates[sdk_dir]
except KeyError:
env_tuple_list = []
SDKEnvironmentUpdates[sdk_dir] = env_tuple_list
include_path = os.path.join(sdk_dir, 'include')
mfc_path = os.path.join(include_path, 'mfc')
atl_path = os.path.join(include_path, 'atl')
if os.path.exists(mfc_path):
env_tuple_list.append(('INCLUDE', mfc_path))
if os.path.exists(atl_path):
env_tuple_list.append(('INCLUDE', atl_path))
env_tuple_list.append(('INCLUDE', include_path))
env_tuple_list.append(('LIB', os.path.join(sdk_dir, 'lib')))
env_tuple_list.append(('LIBPATH', os.path.join(sdk_dir, 'lib')))
env_tuple_list.append(('PATH', os.path.join(sdk_dir, 'bin')))
for variable, directory in env_tuple_list:
env.PrependENVPath(variable, directory)
# TODO(sgk): currently unused; remove?
def get_cur_sdk_dir_from_reg():
"""Try to find the platform sdk directory from the registry.
Return None if failed or the directory does not exist"""
if not SCons.Util.can_read_reg:
debug('SCons cannot read registry')
return None
try:
val = common.read_reg(_CURINSTALLED_SDK_HKEY_ROOT)
debug("Found current sdk dir in registry: %s" % val)
except WindowsError, e:
debug("Did not find current sdk in registry")
return None
if not os.path.exists(val):
debug("Current sdk dir %s not on fs" % val)
return None
return val
def get_sdk_by_version(mssdk):
if mssdk not in SupportedSDKMap:
msg = "SDK version %s is not supported" % repr(mssdk)
raise SCons.Errors.UserError(msg)
get_installed_sdks()
return InstalledSDKMap.get(mssdk)
def get_default_sdk():
"""Set up the default Platform/Windows SDK."""
get_installed_sdks()
if not InstalledSDKList:
return None
return InstalledSDKList[0]
def mssdk_setup_env(env):
debug('sdk.py:mssdk_setup_env()')
if 'MSSDK_DIR' in env:
sdk_dir = env['MSSDK_DIR']
if sdk_dir is None:
return
sdk_dir = env.subst(sdk_dir)
debug('sdk.py:mssdk_setup_env: Using MSSDK_DIR:%s'%sdk_dir)
elif 'MSSDK_VERSION' in env:
sdk_version = env['MSSDK_VERSION']
if sdk_version is None:
msg = "SDK version is specified as None"
raise SCons.Errors.UserError(msg)
sdk_version = env.subst(sdk_version)
mssdk = get_sdk_by_version(sdk_version)
if mssdk is None:
msg = "SDK version %s is not installed" % sdk_version
raise SCons.Errors.UserError(msg)
sdk_dir = mssdk.get_sdk_dir()
debug('sdk.py:mssdk_setup_env: Using MSSDK_VERSION:%s'%sdk_dir)
elif 'MSVS_VERSION' in env:
msvs_version = env['MSVS_VERSION']
debug('sdk.py:mssdk_setup_env:Getting MSVS_VERSION from env:%s'%msvs_version)
if msvs_version is None:
debug('sdk.py:mssdk_setup_env thinks msvs_version is None')
return
msvs_version = env.subst(msvs_version)
import vs
msvs = vs.get_vs_by_version(msvs_version)
debug('sdk.py:mssdk_setup_env:msvs is :%s'%msvs)
if not msvs:
debug('sdk.py:mssdk_setup_env: no VS version detected, bailingout:%s'%msvs)
return
sdk_version = msvs.sdk_version
debug('sdk.py:msvs.sdk_version is %s'%sdk_version)
if not sdk_version:
return
mssdk = get_sdk_by_version(sdk_version)
if not mssdk:
mssdk = get_default_sdk()
if not mssdk:
return
sdk_dir = mssdk.get_sdk_dir()
debug('sdk.py:mssdk_setup_env: Using MSVS_VERSION:%s'%sdk_dir)
else:
mssdk = get_default_sdk()
if not mssdk:
return
sdk_dir = mssdk.get_sdk_dir()
debug('sdk.py:mssdk_setup_env: not using any env values. sdk_dir:%s'%sdk_dir)
set_sdk_by_directory(env, sdk_dir)
#print "No MSVS_VERSION: this is likely to be a bug"
def mssdk_exists(version=None):
sdks = get_installed_sdks()
if version is None:
return len(sdks) > 0
return version in sdks
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
stefanklug/mapnik
|
scons/scons-local-2.3.6/SCons/Tool/MSCommon/sdk.py
|
Python
|
lgpl-2.1
| 14,900
|
"""SCons.Tool.aixc++
Tool-specific initialization for IBM xlC / Visual Age C++ compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
#forward proxy to the preffered cxx version
from SCons.Tool.aixcxx import *
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
mapnik/mapnik
|
scons/scons-local-4.1.0/SCons/Tool/aixc++.py
|
Python
|
lgpl-2.1
| 1,578
|
"""
Graphical user interface functionalities for the
SampleResource Aggregate Manager.
@date: Jun 12, 2013
@author: CarolinaFernandez
"""
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import get_object_or_404
from expedient.clearinghouse.aggregate.models import Aggregate
from expedient.clearinghouse.slice.models import Slice
from expedient.common.messaging.models import DatedMessage
from expedient.common.utils.plugins.plugincommunicator import *
from expedient.common.utils.plugins.resources.link import Link
from expedient.common.utils.plugins.resources.node import Node
from expedient.common.utils.views import generic_crud
from sample_resource.controller.resource import SampleResource as SampleResourceController
from sample_resource.forms.SampleResource import SampleResource as SampleResourceModelForm
from sample_resource.models import SampleResource as SampleResourceModel,\
SampleResourceAggregate as SampleResourceAggregateModel
import copy
import logging
import xmlrpclib
def create_resource(request, slice_id, agg_id):
"""Show a page that allows user to add a SampleResource to the aggregate."""
if request.method == "POST":
# Shows error message when aggregate unreachable, disable SampleResource creation and get back to slice detail page
agg = Aggregate.objects.get(id = agg_id)
if agg.check_status() == False:
DatedMessage.objects.post_message_to_user(
"SampleResource Aggregate '%s' is not available" % agg.name,
request.user, msg_type=DatedMessage.TYPE_ERROR,)
return HttpResponseRedirect(reverse("slice_detail", args=[slice_id]))
if 'create_resource' in request.POST:
return HttpResponseRedirect(reverse("sample_resource_resource_crud", args=[slice_id, agg_id]))
else:
return HttpResponseRedirect(reverse("slice_detail", args=[slice_id]))
def resource_crud(request, slice_id, agg_id, resource_id = None):
"""
Show a page that allows user to create/edit SampleResource's to the Aggregate.
"""
slice = get_object_or_404(Slice, id = slice_id)
aggregate = Aggregate.objects.get(id = agg_id)
error_crud = ""
def pre_save(instance, created):
"""
Fills SampleResource instance prior to its saving.
Used within the scope of the generic_crud method.
"""
instance = SampleResourceController.fill(instance, slice, agg_id, resource_id)
try:
return generic_crud(request, obj_id=resource_id, model=SampleResourceModel,
form_class=SampleResourceModelForm,
template="sample_resource_resource_crud.html",
redirect=lambda inst: reverse("slice_detail", args=[slice_id]),
extra_context={"agg": aggregate, "slice": slice, "exception": error_crud, "breadcrumbs": (
("Home", reverse("home")),
("Project %s" % slice.project.name, reverse("project_detail", args=[slice.project.id])),
("Slice %s" % slice.name, reverse("slice_detail", args=[slice_id])),
("%s SampleResource" % "Update" if resource_id else "Create", reverse("sample_resource_resource_crud", args=[slice_id, agg_id])),)
}, extra_form_params={}, template_object_name="object", pre_save=pre_save,
post_save=None, success_msg=None)
except ValidationError as e:
# Django exception message handling is different to Python's...
error_crud = ";".join(e.messages)
except Exception as e:
print "[WARNING] Could not create resource in plugin 'sample_resource'. Details: %s" % str(e)
DatedMessage.objects.post_message_to_user(
"SampleResource might have been created, but some problem ocurred: %s" % str(e),
request.user, msg_type=DatedMessage.TYPE_ERROR)
return HttpResponseRedirect(reverse("slice_detail", args=[slice_id]))
def manage_resource(request, resource_id, action_type):
"""
Manages the actions executed over SampleResource's.
"""
if action_type == "delete":
SampleResourceController.delete(resource_id)
# Go to manage resources again
return HttpResponse("")
###
# Topology to show in the Expedient
#
def get_sr_list(slice):
return SampleResourceModel.objects.filter(slice_id = slice.uuid)
def get_sr_aggregates(slice):
sr_aggs = []
try:
sr_aggs = slice.aggregates.filter(leaf_name=SampleResourceAggregateModel.__name__.lower())
except:
pass
return sr_aggs
def get_node_description(node):
description = "<strong>Sample Resource: " + node.name + "</strong><br/><br/>"
description += "• Temperature: %s (°%s)" % (str(node.get_temperature()), str(node.get_temperature_scale()))
connections = ""
node_connections = node.get_connections()
for i, connection in enumerate(node_connections):
connections += connection.name
if i < len(node_connections)-1:
connections += ", "
description += "<br/>• Connected to: %s" % str(connections)
return description
def get_nodes_links(slice, chosen_group=None):
nodes = []
links = []
sr_aggs = get_sr_aggregates(slice)
# Getting image for the nodes
# FIXME: avoid to ask the user for the complete name of the method here! he should NOT know it
try:
image_url = reverse('img_media_sample_resource', args=("sensor-tiny.png",))
except:
image_url = 'sensor-tiny.png'
# For every SampleResource AM
for i, sr_agg in enumerate(sr_aggs):
sr_agg = sr_agg.sampleresourceaggregate
# Iterates over every SampleResource contained within the slice
for sr in sr_agg.get_resources():
sr = sr.sampleresource
nodes.append(Node(
# Users shall not be left the choice to choose group/island; otherwise collision may arise
name = sr.name, value = sr.id, aggregate = sr.aggregate, type = "Sample resource",
description = get_node_description(sr), image = image_url)
)
for connection in sr.get_connections():
# Two-ways link
links.append(
Link(
target = str(sr.id), source = str(connection.id),
value = "rsc_id_%s-rsc_id_%s" % (connection.id, sr.id)
),
)
links.append(
Link(
target = str(sr.id), source = str(connection.id),
value = "rsc_id_%s-rsc_id_%s" % (sr.id, connection.id)
),
)
return [nodes, links]
#from expedient.common.utils.plugins.plugininterface import PluginInterface
#
#class Plugin(PluginInterface):
# @staticmethod
def get_ui_data(slice):
"""
Hook method. Use this very same name so Expedient can get the resources for every plugin.
"""
ui_context = dict()
try:
ui_context['sr_list'] = get_sr_list(slice)
ui_context['sr_aggs'] = get_sr_aggregates(slice)
ui_context['nodes'], ui_context['links'] = get_nodes_links(slice)
except Exception as e:
print "[ERROR] Problem loading UI data for plugin 'sample_resource'. Details: %s" % str(e)
return ui_context
|
dana-i2cat/felix
|
expedient/doc/plugins/samples/plugin/sample_resource/controller/GUIdispatcher.py
|
Python
|
apache-2.0
| 7,485
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from keystone import catalog
from keystone.common import sql
from keystone import config
from keystone import exception
from keystone import identity
from keystone import policy
from keystone import test
from keystone import token
from keystone import trust
import default_fixtures
import test_backend
CONF = config.CONF
DEFAULT_DOMAIN_ID = CONF.identity.default_domain_id
class SqlTests(test.TestCase):
def setUp(self):
super(SqlTests, self).setUp()
self.config([test.etcdir('keystone.conf.sample'),
test.testsdir('test_overrides.conf'),
test.testsdir('backend_sql.conf')])
# initialize managers and override drivers
self.catalog_man = catalog.Manager()
self.identity_man = identity.Manager()
self.token_man = token.Manager()
self.trust_man = trust.Manager()
self.policy_man = policy.Manager()
# create shortcut references to each driver
self.catalog_api = self.catalog_man.driver
self.identity_api = self.identity_man.driver
self.token_api = self.token_man.driver
self.policy_api = self.policy_man.driver
self.trust_api = self.trust_man.driver
# populate the engine with tables & fixtures
self.load_fixtures(default_fixtures)
#defaulted by the data load
self.user_foo['enabled'] = True
def tearDown(self):
sql.set_global_engine(None)
super(SqlTests, self).tearDown()
class SqlIdentity(SqlTests, test_backend.IdentityTests):
def test_delete_user_with_project_association(self):
user = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID,
'password': uuid.uuid4().hex}
self.identity_man.create_user({}, user['id'], user)
self.identity_api.add_user_to_project(self.tenant_bar['id'],
user['id'])
self.identity_api.delete_user(user['id'])
self.assertRaises(exception.UserNotFound,
self.identity_api.get_projects_for_user,
user['id'])
def test_create_null_user_name(self):
user = {'id': uuid.uuid4().hex,
'name': None,
'domain_id': DEFAULT_DOMAIN_ID,
'password': uuid.uuid4().hex}
self.assertRaises(exception.ValidationError,
self.identity_man.create_user, {},
user['id'],
user)
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
user['id'])
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user_by_name,
user['name'],
DEFAULT_DOMAIN_ID)
def test_create_null_project_name(self):
tenant = {'id': uuid.uuid4().hex,
'name': None,
'domain_id': DEFAULT_DOMAIN_ID}
self.assertRaises(exception.ValidationError,
self.identity_man.create_project, {},
tenant['id'],
tenant)
self.assertRaises(exception.ProjectNotFound,
self.identity_api.get_project,
tenant['id'])
self.assertRaises(exception.ProjectNotFound,
self.identity_api.get_project_by_name,
tenant['name'],
DEFAULT_DOMAIN_ID)
def test_create_null_role_name(self):
role = {'id': uuid.uuid4().hex,
'name': None}
self.assertRaises(exception.Conflict,
self.identity_api.create_role,
role['id'],
role)
self.assertRaises(exception.RoleNotFound,
self.identity_api.get_role,
role['id'])
def test_delete_project_with_user_association(self):
user = {'id': 'fake',
'name': 'fakeuser',
'domain_id': DEFAULT_DOMAIN_ID,
'password': 'passwd'}
self.identity_man.create_user({}, 'fake', user)
self.identity_api.add_user_to_project(self.tenant_bar['id'],
user['id'])
self.identity_api.delete_project(self.tenant_bar['id'])
tenants = self.identity_api.get_projects_for_user(user['id'])
self.assertEquals(tenants, [])
def test_delete_user_with_metadata(self):
user = {'id': 'fake',
'name': 'fakeuser',
'domain_id': DEFAULT_DOMAIN_ID,
'password': 'passwd'}
self.identity_man.create_user({}, 'fake', user)
self.identity_api.create_metadata(user['id'],
self.tenant_bar['id'],
{'extra': 'extra'})
self.identity_api.delete_user(user['id'])
self.assertRaises(exception.MetadataNotFound,
self.identity_api.get_metadata,
user['id'],
self.tenant_bar['id'])
def test_delete_project_with_metadata(self):
user = {'id': 'fake',
'name': 'fakeuser',
'domain_id': DEFAULT_DOMAIN_ID,
'password': 'passwd'}
self.identity_man.create_user({}, 'fake', user)
self.identity_api.create_metadata(user['id'],
self.tenant_bar['id'],
{'extra': 'extra'})
self.identity_api.delete_project(self.tenant_bar['id'])
self.assertRaises(exception.MetadataNotFound,
self.identity_api.get_metadata,
user['id'],
self.tenant_bar['id'])
def test_update_project_returns_extra(self):
"""This tests for backwards-compatibility with an essex/folsom bug.
Non-indexed attributes were returned in an 'extra' attribute, instead
of on the entity itself; for consistency and backwards compatibility,
those attributes should be included twice.
This behavior is specific to the SQL driver.
"""
tenant_id = uuid.uuid4().hex
arbitrary_key = uuid.uuid4().hex
arbitrary_value = uuid.uuid4().hex
tenant = {
'id': tenant_id,
'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID,
arbitrary_key: arbitrary_value}
ref = self.identity_man.create_project({}, tenant_id, tenant)
self.assertEqual(arbitrary_value, ref[arbitrary_key])
self.assertIsNone(ref.get('extra'))
tenant['name'] = uuid.uuid4().hex
ref = self.identity_api.update_project(tenant_id, tenant)
self.assertEqual(arbitrary_value, ref[arbitrary_key])
self.assertEqual(arbitrary_value, ref['extra'][arbitrary_key])
def test_update_user_returns_extra(self):
"""This tests for backwards-compatibility with an essex/folsom bug.
Non-indexed attributes were returned in an 'extra' attribute, instead
of on the entity itself; for consistency and backwards compatibility,
those attributes should be included twice.
This behavior is specific to the SQL driver.
"""
user_id = uuid.uuid4().hex
arbitrary_key = uuid.uuid4().hex
arbitrary_value = uuid.uuid4().hex
user = {
'id': user_id,
'name': uuid.uuid4().hex,
'domain_id': DEFAULT_DOMAIN_ID,
'password': uuid.uuid4().hex,
arbitrary_key: arbitrary_value}
ref = self.identity_man.create_user({}, user_id, user)
self.assertEqual(arbitrary_value, ref[arbitrary_key])
self.assertIsNone(ref.get('password'))
self.assertIsNone(ref.get('extra'))
user['name'] = uuid.uuid4().hex
user['password'] = uuid.uuid4().hex
ref = self.identity_api.update_user(user_id, user)
self.assertIsNone(ref.get('password'))
self.assertIsNone(ref['extra'].get('password'))
self.assertEqual(arbitrary_value, ref[arbitrary_key])
self.assertEqual(arbitrary_value, ref['extra'][arbitrary_key])
class SqlTrust(SqlTests, test_backend.TrustTests):
pass
class SqlToken(SqlTests, test_backend.TokenTests):
pass
class SqlCatalog(SqlTests, test_backend.CatalogTests):
def test_malformed_catalog_throws_error(self):
service = {
'id': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
}
self.catalog_api.create_service(service['id'], service.copy())
malformed_url = "http://192.168.1.104:$(compute_port)s/v2/$(tenant)s"
endpoint = {
'id': uuid.uuid4().hex,
'region': uuid.uuid4().hex,
'service_id': service['id'],
'interface': 'public',
'url': malformed_url,
}
self.catalog_api.create_endpoint(endpoint['id'], endpoint.copy())
with self.assertRaises(exception.MalformedEndpoint):
self.catalog_api.get_catalog('fake-user', 'fake-tenant')
def test_get_catalog_with_empty_public_url(self):
service = {
'id': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
}
self.catalog_api.create_service(service['id'], service.copy())
endpoint = {
'id': uuid.uuid4().hex,
'region': uuid.uuid4().hex,
'interface': 'public',
'url': '',
'service_id': service['id'],
}
self.catalog_api.create_endpoint(endpoint['id'], endpoint.copy())
catalog = self.catalog_api.get_catalog('user', 'tenant')
catalog_endpoint = catalog[endpoint['region']][service['type']]
self.assertEqual(catalog_endpoint['name'], service['name'])
self.assertEqual(catalog_endpoint['id'], endpoint['id'])
self.assertEqual(catalog_endpoint['publicURL'], '')
self.assertIsNone(catalog_endpoint.get('adminURL'))
self.assertIsNone(catalog_endpoint.get('internalURL'))
def test_create_endpoint_400(self):
service = {
'id': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
}
self.catalog_api.create_service(service['id'], service.copy())
endpoint = {
'id': uuid.uuid4().hex,
'region': "0" * 256,
'service_id': service['id'],
'interface': 'public',
'url': uuid.uuid4().hex,
}
with self.assertRaises(exception.StringLengthExceeded):
self.catalog_api.create_endpoint(endpoint['id'], endpoint.copy())
class SqlPolicy(SqlTests, test_backend.PolicyTests):
pass
|
cbrucks/Federated_Keystone
|
tests/test_backend_sql.py
|
Python
|
apache-2.0
| 11,834
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from alembic import op
import sqlalchemy as sa
"""Add standard attribute table
Revision ID: 32e5974ada25
Revises: 13cfb89f881a
Create Date: 2015-09-10 00:22:47.618593
"""
# revision identifiers, used by Alembic.
revision = '32e5974ada25'
down_revision = '13cfb89f881a'
TABLES = ('ports', 'networks', 'subnets', 'subnetpools', 'securitygroups',
'floatingips', 'routers', 'securitygrouprules')
def upgrade():
op.create_table(
'standardattributes',
sa.Column('id', sa.BigInteger(), autoincrement=True),
sa.Column('resource_type', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('id')
)
for table in TABLES:
op.add_column(table, sa.Column('standard_attr_id', sa.BigInteger(),
nullable=True))
|
noironetworks/neutron
|
neutron/db/migration/alembic_migrations/versions/mitaka/expand/32e5974ada25_add_neutron_resources_table.py
|
Python
|
apache-2.0
| 1,390
|
#*******************************************************************************
# Copyright (c) 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#******************************************************************************/
import requests
import sys
import os
import json
from helpers.dbutils import CloudantDbUtils
from helpers.acmeair_utils import AcmeAirUtils
import conftest
# get the cloudant credentials from pytest config file
test_properties = conftest.test_properties()
class DataLoader:
"""
Test data loader related functions
"""
def load_AcmeData(self, num_of_cust):
"""
Reset databases and use the AcmeAir database loader to populate initial customer,
flight and airportmapping data. Does NOT generate user data like bookings.
"""
print ("num_of_cust: ", num_of_cust)
acmeair = AcmeAirUtils()
try:
if acmeair.is_acmeair_running() != 0:
raise RuntimeError("""
AcmeAir is already running which may cause unexpected results when
resetting databases. Please shut down the app and try again.
""")
else:
cloudantUtils = CloudantDbUtils(test_properties)
cloudantUtils.reset_databases()
acmeair.start_acmeair()
acmeair.load_data(num_of_cust)
finally:
acmeair.stop_acmeair()
def remove_AcmeDb(self, num_of_cust):
"""
Drop all AcmeAir databases
"""
acmeair = AcmeAirUtils()
if acmeair.is_acmeair_running() != 0:
acmeair.stop_acmeair()
cloudantUtils = CloudantDbUtils(test_properties)
cloudantUtils.drop_all_databases()
def load_SpecCharValuePredicateData(self):
"""
Create booking data needed to test SpecCharValuePredicate
"""
try:
acmeair = AcmeAirUtils()
acmeair.start_acmeair()
# book flights AA93 and AA330
flight1 = "AA93"
flight2 = "AA330"
# Step#1 - need to find the flights generated _id required for booking
flight1_id = acmeair.get_flightId_by_number(flight1)
print ("{} id = {}".format(flight1, flight1_id))
flight2_id = acmeair.get_flightId_by_number(flight2)
print ("{} id = {}".format(flight2, flight2_id))
# Step#2 - add the boooking
acmeair.book_flights("uid0@email.com", flight1, flight2)
finally:
acmeair.stop_acmeair()
if __name__ =='__main__':
"""
Utility to create test databases and load data
"""
import argparse
parser = argparse.ArgumentParser(description="Utility to load AcmeAir data required for python spark-cloudant tests")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-cleanup', action='store_true', help='Drop all test databases')
group.add_argument('-load', help='Reset and Load databases with the given # of users. -load 0 to just recreate databases and indexes.', type=int)
args = parser.parse_args()
dataloader = DataLoader()
if args.load is not None:
if args.load == 0:
cloudantUtils = CloudantDbUtils(test_properties)
cloudantUtils.reset_databases()
else:
dataloader.load_AcmeData(args.load)
dataloader.load_SpecCharValuePredicateData()
elif args.cleanup:
cloudantUtils = CloudantDbUtils(test_properties)
cloudantUtils.drop_all_databases()
|
snowch/spark-cloudant
|
test/helpers/dataload.py
|
Python
|
apache-2.0
| 3,656
|
# -*- coding: utf-8 -*-
from rest_framework import status as http_status
import itertools
from flask import request
from framework import status
from framework.exceptions import HTTPError
from framework.flask import redirect # VOL-aware redirect
from framework.auth.decorators import must_be_signed
from website.archiver import ARCHIVER_SUCCESS, ARCHIVER_FAILURE
from addons.base.views import DOWNLOAD_ACTIONS
from website import settings
from osf.exceptions import NodeStateError
from website.project.decorators import (
must_be_valid_project, must_be_contributor_or_public,
must_have_permission, must_be_contributor_and_not_group_member,
must_not_be_registration, must_be_registration,
must_not_be_retracted_registration
)
from osf import features
from osf.models import Identifier, RegistrationSchema
from website.project.utils import serialize_node
from osf.utils.permissions import ADMIN
from website import language
from website.ember_osf_web.decorators import ember_flag_is_active
from website.project import signals as project_signals
from website.project.metadata.schemas import _id_to_name
from website import util
from website.project.metadata.utils import serialize_meta_schema
from website.project.model import has_anonymous_link
from website.archiver.decorators import fail_archive_on_error
from .node import _view_project
from api.waffle.utils import flag_is_active
@must_be_valid_project
@must_not_be_retracted_registration
@must_be_contributor_or_public
def node_register_page(auth, node, **kwargs):
"""Display the registration metadata for a registration.
:return: serialized Node
"""
if node.is_registration:
return serialize_node(node, auth)
else:
status.push_status_message(
'You have been redirected to the project\'s registrations page. From here you can initiate a new Draft Registration to complete the registration process',
trust=False,
id='redirected_to_registrations',
)
return redirect(node.web_url_for('node_registrations', view='draft', _guid=True))
@must_be_valid_project
@must_have_permission(ADMIN)
@must_be_contributor_and_not_group_member
def node_registration_retraction_redirect(auth, node, **kwargs):
return redirect(node.web_url_for('node_registration_retraction_get', _guid=True))
@must_be_valid_project
@must_not_be_retracted_registration
@must_have_permission(ADMIN)
@must_be_contributor_and_not_group_member
def node_registration_retraction_get(auth, node, **kwargs):
"""Prepares node object for registration retraction page.
:return: serialized Node to be retracted
:raises: 400: BAD_REQUEST if registration already pending retraction
"""
if not node.is_registration:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data={
'message_short': 'Invalid Request',
'message_long': 'Withdrawal of non-registrations is not permitted.'
})
if node.is_pending_retraction:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data={
'message_short': 'Invalid Request',
'message_long': 'This registration is already pending withdrawal.'
})
return serialize_node(node, auth, primary=True)
@must_be_valid_project
@must_have_permission(ADMIN)
@must_be_contributor_and_not_group_member
def node_registration_retraction_post(auth, node, **kwargs):
"""Handles retraction of public registrations
:param auth: Authentication object for User
:return: Redirect URL for successful POST
"""
if node.is_pending_retraction:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data={
'message_short': 'Invalid Request',
'message_long': 'This registration is already pending withdrawal'
})
if not node.is_registration:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data={
'message_short': 'Invalid Request',
'message_long': 'Withdrawal of non-registrations is not permitted.'
})
if node.root_id != node.id:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data={
'message_short': 'Invalid Request',
'message_long': 'Withdrawal of non-parent registrations is not permitted.'
})
data = request.get_json()
try:
node.retract_registration(auth.user, data.get('justification', None))
node.save()
node.retraction.ask(node.get_active_contributors_recursive(unique_users=True))
except NodeStateError as err:
raise HTTPError(http_status.HTTP_403_FORBIDDEN, data=dict(message_long=str(err)))
return {'redirectUrl': node.web_url_for('view_project')}
@must_be_valid_project
@must_not_be_retracted_registration
@must_be_contributor_or_public
@ember_flag_is_active(features.EMBER_REGISTRATION_FORM_DETAIL)
def node_register_template_page(auth, node, metaschema_id, **kwargs):
if flag_is_active(request, features.EMBER_REGISTRIES_DETAIL_PAGE):
# Registration meta page obviated during redesign
return redirect(node.url)
if node.is_registration and bool(node.registered_schema):
try:
meta_schema = RegistrationSchema.objects.get(_id=metaschema_id)
except RegistrationSchema.DoesNotExist:
# backwards compatability for old urls, lookup by name
meta_schema = RegistrationSchema.objects.filter(name=_id_to_name(metaschema_id)).order_by('-schema_version').first()
if not meta_schema:
raise HTTPError(http_status.HTTP_404_NOT_FOUND, data={
'message_short': 'Invalid schema name',
'message_long': 'No registration schema with that name could be found.'
})
ret = _view_project(node, auth, primary=True)
my_meta = serialize_meta_schema(meta_schema)
if has_anonymous_link(node, auth):
for indx, schema_page in enumerate(my_meta['schema']['pages']):
for idx, schema_question in enumerate(schema_page['questions']):
if schema_question['title'] in settings.ANONYMIZED_TITLES:
del my_meta['schema']['pages'][indx]['questions'][idx]
ret['node']['registered_schema'] = serialize_meta_schema(meta_schema)
return ret
else:
status.push_status_message(
'You have been redirected to the project\'s registrations page. From here you can initiate a new Draft Registration to complete the registration process',
trust=False,
id='redirected_to_registrations',
)
return redirect(node.web_url_for('node_registrations', view=kwargs.get('template'), _guid=True))
@must_be_valid_project # returns project
@must_have_permission(ADMIN)
@must_be_contributor_and_not_group_member
@must_not_be_registration
def project_before_register(auth, node, **kwargs):
"""Returns prompt informing user that addons, if any, won't be registered."""
# TODO: Avoid generating HTML code in Python; all HTML should be in display layer
messages = {
'full': {
'addons': set(),
'message': 'The content and version history of <strong>{0}</strong> will be copied to the registration.',
},
'partial': {
'addons': set(),
'message': 'The current version of the content in <strong>{0}</strong> will be copied to the registration, but version history will be lost.'
},
'none': {
'addons': set(),
'message': 'The contents of <strong>{0}</strong> cannot be registered at this time, and will not be included as part of this registration.',
},
}
errors = {}
addon_set = [n.get_addons() for n in itertools.chain([node], node.get_descendants_recursive(primary_only=True))]
for addon in itertools.chain(*addon_set):
if not addon.complete:
continue
archive_errors = getattr(addon, 'archive_errors', None)
error = None
if archive_errors:
error = archive_errors()
if error:
errors[addon.config.short_name] = error
continue
name = addon.config.short_name
if name in settings.ADDONS_ARCHIVABLE:
messages[settings.ADDONS_ARCHIVABLE[name]]['addons'].add(addon.config.full_name)
else:
messages['none']['addons'].add(addon.config.full_name)
error_messages = list(errors.values())
prompts = [
m['message'].format(util.conjunct(m['addons']))
for m in messages.values() if m['addons']
]
if node.has_pointers_recursive:
prompts.append(
language.BEFORE_REGISTER_HAS_POINTERS.format(
category=node.project_or_component
)
)
return {
'prompts': prompts,
'errors': error_messages
}
def osf_admin_change_status_identifier(node):
if node.get_identifier_value('doi'):
node.request_identifier_update(category='doi')
def get_referent_by_identifier(category, value):
"""Look up identifier by `category` and `value` and redirect to its referent
if found.
"""
try:
identifier = Identifier.objects.get(category=category, value=value)
except Identifier.DoesNotExist:
raise HTTPError(http_status.HTTP_404_NOT_FOUND)
if identifier.referent.url:
return redirect(identifier.referent.url)
raise HTTPError(http_status.HTTP_404_NOT_FOUND)
@fail_archive_on_error
@must_be_signed
@must_be_registration
def registration_callbacks(node, payload, *args, **kwargs):
if payload.get('action', None) in DOWNLOAD_ACTIONS:
return {'status': 'success'}
errors = payload.get('errors')
src_provider = payload['source']['provider']
if errors:
node.archive_job.update_target(
src_provider,
ARCHIVER_FAILURE,
errors=errors,
)
else:
# Dataverse requires two seperate targets, one
# for draft files and one for published files
if src_provider == 'dataverse':
src_provider += '-' + (payload['destination']['name'].split(' ')[-1].lstrip('(').rstrip(')').strip())
node.archive_job.update_target(
src_provider,
ARCHIVER_SUCCESS,
)
project_signals.archive_callback.send(node)
|
mfraezz/osf.io
|
website/project/views/register.py
|
Python
|
apache-2.0
| 10,365
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# drawElements Quality Program utilities
# --------------------------------------
#
# Copyright 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
from src_util import *
def commandES31InitStatement (command):
memberName = getFunctionMemberName(command.name)
return "dst->%s\t= src.%s;" % (memberName, memberName)
def genES31WrapperFuncs (registry):
iface = getInterface(registry, api='gles2', version="3.1")
genCommandList(iface, commandES31InitStatement,
directory = OPENGL_DIR,
filename = "gluES3PlusWrapperFuncs.inl",
align = True)
if __name__ == "__main__":
genES31WrapperFuncs(getGLRegistry())
|
maurossi/deqp
|
scripts/opengl/gen_es31_wrapper.py
|
Python
|
apache-2.0
| 1,342
|
"""A high-speed, production ready, thread pooled, generic HTTP server.
Simplest example on how to use this module directly
(without using CherryPy's application machinery)::
from cherrypy import wsgiserver
def my_crazy_app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type','text/plain')]
start_response(status, response_headers)
return ['Hello world!']
server = wsgiserver.CherryPyWSGIServer(
('0.0.0.0', 8070), my_crazy_app,
server_name='www.cherrypy.example')
server.start()
The CherryPy WSGI server can serve as many WSGI applications
as you want in one instance by using a WSGIPathInfoDispatcher::
d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app})
server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d)
Want SSL support? Just set server.ssl_adapter to an SSLAdapter instance.
This won't call the CherryPy engine (application side) at all, only the
HTTP server, which is independent from the rest of CherryPy. Don't
let the name "CherryPyWSGIServer" throw you; the name merely reflects
its origin, not its coupling.
For those of you wanting to understand internals of this module, here's the
basic call flow. The server's listening thread runs a very tight loop,
sticking incoming connections onto a Queue::
server = CherryPyWSGIServer(...)
server.start()
while True:
tick()
# This blocks until a request comes in:
child = socket.accept()
conn = HTTPConnection(child, ...)
server.requests.put(conn)
Worker threads are kept in a pool and poll the Queue, popping off and then
handling each connection in turn. Each connection can consist of an arbitrary
number of requests and their responses, so we run a nested loop::
while True:
conn = server.requests.get()
conn.communicate()
-> while True:
req = HTTPRequest(...)
req.parse_request()
-> # Read the Request-Line, e.g. "GET /page HTTP/1.1"
req.rfile.readline()
read_headers(req.rfile, req.inheaders)
req.respond()
-> response = app(...)
try:
for chunk in response:
if chunk:
req.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
if req.close_connection:
return
"""
__all__ = ['HTTPRequest', 'HTTPConnection', 'HTTPServer',
'SizeCheckWrapper', 'KnownLengthRFile', 'ChunkedRFile',
'CP_makefile',
'MaxSizeExceeded', 'NoSSLError', 'FatalSSLAlert',
'WorkerThread', 'ThreadPool', 'SSLAdapter',
'CherryPyWSGIServer',
'Gateway', 'WSGIGateway', 'WSGIGateway_10', 'WSGIGateway_u0',
'WSGIPathInfoDispatcher', 'get_ssl_adapter_class']
import os
try:
import queue
except:
import Queue as queue
import re
import email.utils
import socket
import sys
if 'win' in sys.platform and hasattr(socket, "AF_INET6"):
if not hasattr(socket, 'IPPROTO_IPV6'):
socket.IPPROTO_IPV6 = 41
if not hasattr(socket, 'IPV6_V6ONLY'):
socket.IPV6_V6ONLY = 27
if sys.version_info < (3,1):
import io
else:
import _pyio as io
DEFAULT_BUFFER_SIZE = io.DEFAULT_BUFFER_SIZE
import threading
import time
from traceback import format_exc
if sys.version_info >= (3, 0):
bytestr = bytes
unicodestr = str
basestring = (bytes, str)
def ntob(n, encoding='ISO-8859-1'):
"""Return the given native string as a byte string in the given encoding."""
# In Python 3, the native string type is unicode
return n.encode(encoding)
else:
bytestr = str
unicodestr = unicode
basestring = basestring
def ntob(n, encoding='ISO-8859-1'):
"""Return the given native string as a byte string in the given encoding."""
# In Python 2, the native string type is bytes. Assume it's already
# in the given encoding, which for ISO-8859-1 is almost always what
# was intended.
return n
LF = ntob('\n')
CRLF = ntob('\r\n')
TAB = ntob('\t')
SPACE = ntob(' ')
COLON = ntob(':')
SEMICOLON = ntob(';')
EMPTY = ntob('')
NUMBER_SIGN = ntob('#')
QUESTION_MARK = ntob('?')
ASTERISK = ntob('*')
FORWARD_SLASH = ntob('/')
quoted_slash = re.compile(ntob("(?i)%2F"))
import errno
def plat_specific_errors(*errnames):
"""Return error numbers for all errors in errnames on this platform.
The 'errno' module contains different global constants depending on
the specific platform (OS). This function will return the list of
numeric values for a given list of potential names.
"""
errno_names = dir(errno)
nums = [getattr(errno, k) for k in errnames if k in errno_names]
# de-dupe the list
return list(dict.fromkeys(nums).keys())
socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR")
socket_errors_to_ignore = plat_specific_errors(
"EPIPE",
"EBADF", "WSAEBADF",
"ENOTSOCK", "WSAENOTSOCK",
"ETIMEDOUT", "WSAETIMEDOUT",
"ECONNREFUSED", "WSAECONNREFUSED",
"ECONNRESET", "WSAECONNRESET",
"ECONNABORTED", "WSAECONNABORTED",
"ENETRESET", "WSAENETRESET",
"EHOSTDOWN", "EHOSTUNREACH",
)
socket_errors_to_ignore.append("timed out")
socket_errors_to_ignore.append("The read operation timed out")
socket_errors_nonblocking = plat_specific_errors(
'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK')
comma_separated_headers = [ntob(h) for h in
['Accept', 'Accept-Charset', 'Accept-Encoding',
'Accept-Language', 'Accept-Ranges', 'Allow', 'Cache-Control',
'Connection', 'Content-Encoding', 'Content-Language', 'Expect',
'If-Match', 'If-None-Match', 'Pragma', 'Proxy-Authenticate', 'TE',
'Trailer', 'Transfer-Encoding', 'Upgrade', 'Vary', 'Via', 'Warning',
'WWW-Authenticate']]
import logging
if not hasattr(logging, 'statistics'): logging.statistics = {}
def read_headers(rfile, hdict=None):
"""Read headers from the given stream into the given header dict.
If hdict is None, a new header dict is created. Returns the populated
header dict.
Headers which are repeated are folded together using a comma if their
specification so dictates.
This function raises ValueError when the read bytes violate the HTTP spec.
You should probably return "400 Bad Request" if this happens.
"""
if hdict is None:
hdict = {}
while True:
line = rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError("Illegal end of headers.")
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError("HTTP requires CRLF terminators")
if line[0] in (SPACE, TAB):
# It's a continuation line.
v = line.strip()
else:
try:
k, v = line.split(COLON, 1)
except ValueError:
raise ValueError("Illegal header line.")
# TODO: what about TE and WWW-Authenticate?
k = k.strip().title()
v = v.strip()
hname = k
if k in comma_separated_headers:
existing = hdict.get(hname)
if existing:
v = b", ".join((existing, v))
hdict[hname] = v
return hdict
class MaxSizeExceeded(Exception):
pass
class SizeCheckWrapper(object):
"""Wraps a file-like object, raising MaxSizeExceeded if too large."""
def __init__(self, rfile, maxlen):
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
def _check_length(self):
if self.maxlen and self.bytes_read > self.maxlen:
raise MaxSizeExceeded()
def read(self, size=None):
data = self.rfile.read(size)
self.bytes_read += len(data)
self._check_length()
return data
def readline(self, size=None):
if size is not None:
data = self.rfile.readline(size)
self.bytes_read += len(data)
self._check_length()
return data
# User didn't specify a size ...
# We read the line in chunks to make sure it's not a 100MB line !
res = []
while True:
data = self.rfile.readline(256)
self.bytes_read += len(data)
self._check_length()
res.append(data)
# See https://bitbucket.org/cherrypy/cherrypy/issue/421
if len(data) < 256 or data[-1:].decode() == LF:
return EMPTY.join(res)
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def close(self):
self.rfile.close()
def __iter__(self):
return self
def __next__(self):
data = next(self.rfile)
self.bytes_read += len(data)
self._check_length()
return data
def next(self):
data = self.rfile.next()
self.bytes_read += len(data)
self._check_length()
return data
class KnownLengthRFile(object):
"""Wraps a file-like object, returning an empty string when exhausted."""
def __init__(self, rfile, content_length):
self.rfile = rfile
self.remaining = content_length
def read(self, size=None):
if self.remaining == 0:
return b''
if size is None:
size = self.remaining
else:
size = min(size, self.remaining)
data = self.rfile.read(size)
self.remaining -= len(data)
return data
def readline(self, size=None):
if self.remaining == 0:
return b''
if size is None:
size = self.remaining
else:
size = min(size, self.remaining)
data = self.rfile.readline(size)
self.remaining -= len(data)
return data
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline(sizehint)
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
return lines
def close(self):
self.rfile.close()
def __iter__(self):
return self
def __next__(self):
data = next(self.rfile)
self.remaining -= len(data)
return data
class ChunkedRFile(object):
"""Wraps a file-like object, returning an empty string when exhausted.
This class is intended to provide a conforming wsgi.input value for
request entities that have been encoded with the 'chunked' transfer
encoding.
"""
def __init__(self, rfile, maxlen, bufsize=8192):
self.rfile = rfile
self.maxlen = maxlen
self.bytes_read = 0
self.buffer = EMPTY
self.bufsize = bufsize
self.closed = False
def _fetch(self):
if self.closed:
return
line = self.rfile.readline()
self.bytes_read += len(line)
if self.maxlen and self.bytes_read > self.maxlen:
raise MaxSizeExceeded("Request Entity Too Large", self.maxlen)
line = line.strip().split(SEMICOLON, 1)
try:
chunk_size = line.pop(0)
chunk_size = int(chunk_size, 16)
except ValueError:
raise ValueError("Bad chunked transfer size: " + repr(chunk_size))
if chunk_size <= 0:
self.closed = True
return
## if line: chunk_extension = line[0]
if self.maxlen and self.bytes_read + chunk_size > self.maxlen:
raise IOError("Request Entity Too Large")
chunk = self.rfile.read(chunk_size)
self.bytes_read += len(chunk)
self.buffer += chunk
crlf = self.rfile.read(2)
if crlf != CRLF:
raise ValueError(
"Bad chunked transfer coding (expected '\\r\\n', "
"got " + repr(crlf) + ")")
def read(self, size=None):
data = EMPTY
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
if size:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
data += self.buffer
def readline(self, size=None):
data = EMPTY
while True:
if size and len(data) >= size:
return data
if not self.buffer:
self._fetch()
if not self.buffer:
# EOF
return data
newline_pos = self.buffer.find(LF)
if size:
if newline_pos == -1:
remaining = size - len(data)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
remaining = min(size - len(data), newline_pos)
data += self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
else:
if newline_pos == -1:
data += self.buffer
else:
data += self.buffer[:newline_pos]
self.buffer = self.buffer[newline_pos:]
def readlines(self, sizehint=0):
# Shamelessly stolen from StringIO
total = 0
lines = []
line = self.readline(sizehint)
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
return lines
def read_trailer_lines(self):
if not self.closed:
raise ValueError(
"Cannot read trailers until the request body has been read.")
while True:
line = self.rfile.readline()
if not line:
# No more data--illegal end of headers
raise ValueError("Illegal end of headers.")
self.bytes_read += len(line)
if self.maxlen and self.bytes_read > self.maxlen:
raise IOError("Request Entity Too Large")
if line == CRLF:
# Normal end of headers
break
if not line.endswith(CRLF):
raise ValueError("HTTP requires CRLF terminators")
yield line
def close(self):
self.rfile.close()
def __iter__(self):
# Shamelessly stolen from StringIO
total = 0
line = self.readline(sizehint)
while line:
yield line
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline(sizehint)
class HTTPRequest(object):
"""An HTTP Request (and response).
A single HTTP connection may consist of multiple request/response pairs.
"""
server = None
"""The HTTPServer object which is receiving this request."""
conn = None
"""The HTTPConnection object on which this request connected."""
inheaders = {}
"""A dict of request headers."""
outheaders = []
"""A list of header tuples to write in the response."""
ready = False
"""When True, the request has been parsed and is ready to begin generating
the response. When False, signals the calling Connection that the response
should not be generated and the connection should close."""
close_connection = False
"""Signals the calling Connection that the request should close. This does
not imply an error! The client and/or server may each request that the
connection be closed."""
chunked_write = False
"""If True, output will be encoded with the "chunked" transfer-coding.
This value is set automatically inside send_headers."""
def __init__(self, server, conn):
self.server= server
self.conn = conn
self.ready = False
self.started_request = False
self.scheme = ntob("http")
if self.server.ssl_adapter is not None:
self.scheme = ntob("https")
# Use the lowest-common protocol in case read_request_line errors.
self.response_protocol = 'HTTP/1.0'
self.inheaders = {}
self.status = ""
self.outheaders = []
self.sent_headers = False
self.close_connection = self.__class__.close_connection
self.chunked_read = False
self.chunked_write = self.__class__.chunked_write
def parse_request(self):
"""Parse the next HTTP request start-line and message-headers."""
self.rfile = SizeCheckWrapper(self.conn.rfile,
self.server.max_request_header_size)
try:
success = self.read_request_line()
except MaxSizeExceeded:
self.simple_response("414 Request-URI Too Long",
"The Request-URI sent with the request exceeds the maximum "
"allowed bytes.")
return
else:
if not success:
return
try:
success = self.read_request_headers()
except MaxSizeExceeded:
self.simple_response("413 Request Entity Too Large",
"The headers sent with the request exceed the maximum "
"allowed bytes.")
return
else:
if not success:
return
self.ready = True
def read_request_line(self):
# HTTP/1.1 connections are persistent by default. If a client
# requests a page, then idles (leaves the connection open),
# then rfile.readline() will raise socket.error("timed out").
# Note that it does this based on the value given to settimeout(),
# and doesn't need the client to request or acknowledge the close
# (although your TCP stack might suffer for it: cf Apache's history
# with FIN_WAIT_2).
request_line = self.rfile.readline()
# Set started_request to True so communicate() knows to send 408
# from here on out.
self.started_request = True
if not request_line:
return False
if request_line == CRLF:
# RFC 2616 sec 4.1: "...if the server is reading the protocol
# stream at the beginning of a message and receives a CRLF
# first, it should ignore the CRLF."
# But only ignore one leading line! else we enable a DoS.
request_line = self.rfile.readline()
if not request_line:
return False
if not request_line.endswith(CRLF):
self.simple_response("400 Bad Request", "HTTP requires CRLF terminators")
return False
try:
method, uri, req_protocol = request_line.strip().split(SPACE, 2)
# The [x:y] slicing is necessary for byte strings to avoid getting ord's
rp = int(req_protocol[5:6]), int(req_protocol[7:8])
except ValueError:
self.simple_response("400 Bad Request", "Malformed Request-Line")
return False
self.uri = uri
self.method = method
# uri may be an abs_path (including "http://host.domain.tld");
scheme, authority, path = self.parse_request_uri(uri)
if NUMBER_SIGN in path:
self.simple_response("400 Bad Request",
"Illegal #fragment in Request-URI.")
return False
if scheme:
self.scheme = scheme
qs = EMPTY
if QUESTION_MARK in path:
path, qs = path.split(QUESTION_MARK, 1)
# Unquote the path+params (e.g. "/this%20path" -> "/this path").
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
#
# But note that "...a URI must be separated into its components
# before the escaped characters within those components can be
# safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2
# Therefore, "/this%2Fpath" becomes "/this%2Fpath", not "/this/path".
try:
atoms = [self.unquote_bytes(x) for x in quoted_slash.split(path)]
except ValueError:
ex = sys.exc_info()[1]
self.simple_response("400 Bad Request", ex.args[0])
return False
path = b"%2F".join(atoms)
self.path = path
# Note that, like wsgiref and most other HTTP servers,
# we "% HEX HEX"-unquote the path but not the query string.
self.qs = qs
# Compare request and server HTTP protocol versions, in case our
# server does not support the requested protocol. Limit our output
# to min(req, server). We want the following output:
# request server actual written supported response
# protocol protocol response protocol feature set
# a 1.0 1.0 1.0 1.0
# b 1.0 1.1 1.1 1.0
# c 1.1 1.0 1.0 1.0
# d 1.1 1.1 1.1 1.1
# Notice that, in (b), the response will be "HTTP/1.1" even though
# the client only understands 1.0. RFC 2616 10.5.6 says we should
# only return 505 if the _major_ version is different.
# The [x:y] slicing is necessary for byte strings to avoid getting ord's
sp = int(self.server.protocol[5:6]), int(self.server.protocol[7:8])
if sp[0] != rp[0]:
self.simple_response("505 HTTP Version Not Supported")
return False
self.request_protocol = req_protocol
self.response_protocol = "HTTP/%s.%s" % min(rp, sp)
return True
def read_request_headers(self):
"""Read self.rfile into self.inheaders. Return success."""
# then all the http headers
try:
read_headers(self.rfile, self.inheaders)
except ValueError:
ex = sys.exc_info()[1]
self.simple_response("400 Bad Request", ex.args[0])
return False
mrbs = self.server.max_request_body_size
if mrbs and int(self.inheaders.get(b"Content-Length", 0)) > mrbs:
self.simple_response("413 Request Entity Too Large",
"The entity sent with the request exceeds the maximum "
"allowed bytes.")
return False
# Persistent connection support
if self.response_protocol == "HTTP/1.1":
# Both server and client are HTTP/1.1
if self.inheaders.get(b"Connection", b"") == b"close":
self.close_connection = True
else:
# Either the server or client (or both) are HTTP/1.0
if self.inheaders.get(b"Connection", b"") != b"Keep-Alive":
self.close_connection = True
# Transfer-Encoding support
te = None
if self.response_protocol == "HTTP/1.1":
te = self.inheaders.get(b"Transfer-Encoding")
if te:
te = [x.strip().lower() for x in te.split(b",") if x.strip()]
self.chunked_read = False
if te:
for enc in te:
if enc == b"chunked":
self.chunked_read = True
else:
# Note that, even if we see "chunked", we must reject
# if there is an extension we don't recognize.
self.simple_response("501 Unimplemented")
self.close_connection = True
return False
# From PEP 333:
# "Servers and gateways that implement HTTP 1.1 must provide
# transparent support for HTTP 1.1's "expect/continue" mechanism.
# This may be done in any of several ways:
# 1. Respond to requests containing an Expect: 100-continue request
# with an immediate "100 Continue" response, and proceed normally.
# 2. Proceed with the request normally, but provide the application
# with a wsgi.input stream that will send the "100 Continue"
# response if/when the application first attempts to read from
# the input stream. The read request must then remain blocked
# until the client responds.
# 3. Wait until the client decides that the server does not support
# expect/continue, and sends the request body on its own.
# (This is suboptimal, and is not recommended.)
#
# We used to do 3, but are now doing 1. Maybe we'll do 2 someday,
# but it seems like it would be a big slowdown for such a rare case.
if self.inheaders.get(b"Expect", b"") == b"100-continue":
# Don't use simple_response here, because it emits headers
# we don't want. See https://bitbucket.org/cherrypy/cherrypy/issue/951
msg = self.server.protocol.encode('ascii') + b" 100 Continue\r\n\r\n"
try:
self.conn.wfile.write(msg)
except socket.error:
x = sys.exc_info()[1]
if x.args[0] not in socket_errors_to_ignore:
raise
return True
def parse_request_uri(self, uri):
"""Parse a Request-URI into (scheme, authority, path).
Note that Request-URI's must be one of::
Request-URI = "*" | absoluteURI | abs_path | authority
Therefore, a Request-URI which starts with a double forward-slash
cannot be a "net_path"::
net_path = "//" authority [ abs_path ]
Instead, it must be interpreted as an "abs_path" with an empty first
path segment::
abs_path = "/" path_segments
path_segments = segment *( "/" segment )
segment = *pchar *( ";" param )
param = *pchar
"""
if uri == ASTERISK:
return None, None, uri
scheme, sep, remainder = uri.partition(b'://')
if sep and QUESTION_MARK not in scheme:
# An absoluteURI.
# If there's a scheme (and it must be http or https), then:
# http_URL = "http:" "//" host [ ":" port ] [ abs_path [ "?" query ]]
authority, path_a, path_b = remainder.partition(FORWARD_SLASH)
return scheme.lower(), authority, path_a+path_b
if uri.startswith(FORWARD_SLASH):
# An abs_path.
return None, None, uri
else:
# An authority.
return None, uri, None
def unquote_bytes(self, path):
"""takes quoted string and unquotes % encoded values"""
res = path.split(b'%')
for i in range(1, len(res)):
item = res[i]
try:
res[i] = bytes([int(item[:2], 16)]) + item[2:]
except ValueError:
raise
return b''.join(res)
def respond(self):
"""Call the gateway and write its iterable output."""
mrbs = self.server.max_request_body_size
if self.chunked_read:
self.rfile = ChunkedRFile(self.conn.rfile, mrbs)
else:
cl = int(self.inheaders.get(b"Content-Length", 0))
if mrbs and mrbs < cl:
if not self.sent_headers:
self.simple_response("413 Request Entity Too Large",
"The entity sent with the request exceeds the maximum "
"allowed bytes.")
return
self.rfile = KnownLengthRFile(self.conn.rfile, cl)
self.server.gateway(self).respond()
if (self.ready and not self.sent_headers):
self.sent_headers = True
self.send_headers()
if self.chunked_write:
self.conn.wfile.write(b"0\r\n\r\n")
def simple_response(self, status, msg=""):
"""Write a simple response back to the client."""
status = str(status)
buf = [bytes(self.server.protocol, "ascii") + SPACE +
bytes(status, "ISO-8859-1") + CRLF,
bytes("Content-Length: %s\r\n" % len(msg), "ISO-8859-1"),
b"Content-Type: text/plain\r\n"]
if status[:3] in ("413", "414"):
# Request Entity Too Large / Request-URI Too Long
self.close_connection = True
if self.response_protocol == 'HTTP/1.1':
# This will not be true for 414, since read_request_line
# usually raises 414 before reading the whole line, and we
# therefore cannot know the proper response_protocol.
buf.append(b"Connection: close\r\n")
else:
# HTTP/1.0 had no 413/414 status nor Connection header.
# Emit 400 instead and trust the message body is enough.
status = "400 Bad Request"
buf.append(CRLF)
if msg:
if isinstance(msg, unicodestr):
msg = msg.encode("ISO-8859-1")
buf.append(msg)
try:
self.conn.wfile.write(b"".join(buf))
except socket.error:
x = sys.exc_info()[1]
if x.args[0] not in socket_errors_to_ignore:
raise
def write(self, chunk):
"""Write unbuffered data to the client."""
if self.chunked_write and chunk:
buf = [bytes(hex(len(chunk)), 'ASCII')[2:], CRLF, chunk, CRLF]
self.conn.wfile.write(EMPTY.join(buf))
else:
self.conn.wfile.write(chunk)
def send_headers(self):
"""Assert, process, and send the HTTP response message-headers.
You must set self.status, and self.outheaders before calling this.
"""
hkeys = [key.lower() for key, value in self.outheaders]
status = int(self.status[:3])
if status == 413:
# Request Entity Too Large. Close conn to avoid garbage.
self.close_connection = True
elif b"content-length" not in hkeys:
# "All 1xx (informational), 204 (no content),
# and 304 (not modified) responses MUST NOT
# include a message-body." So no point chunking.
if status < 200 or status in (204, 205, 304):
pass
else:
if (self.response_protocol == 'HTTP/1.1'
and self.method != b'HEAD'):
# Use the chunked transfer-coding
self.chunked_write = True
self.outheaders.append((b"Transfer-Encoding", b"chunked"))
else:
# Closing the conn is the only way to determine len.
self.close_connection = True
if b"connection" not in hkeys:
if self.response_protocol == 'HTTP/1.1':
# Both server and client are HTTP/1.1 or better
if self.close_connection:
self.outheaders.append((b"Connection", b"close"))
else:
# Server and/or client are HTTP/1.0
if not self.close_connection:
self.outheaders.append((b"Connection", b"Keep-Alive"))
if (not self.close_connection) and (not self.chunked_read):
# Read any remaining request body data on the socket.
# "If an origin server receives a request that does not include an
# Expect request-header field with the "100-continue" expectation,
# the request includes a request body, and the server responds
# with a final status code before reading the entire request body
# from the transport connection, then the server SHOULD NOT close
# the transport connection until it has read the entire request,
# or until the client closes the connection. Otherwise, the client
# might not reliably receive the response message. However, this
# requirement is not be construed as preventing a server from
# defending itself against denial-of-service attacks, or from
# badly broken client implementations."
remaining = getattr(self.rfile, 'remaining', 0)
if remaining > 0:
self.rfile.read(remaining)
if b"date" not in hkeys:
self.outheaders.append(
(b"Date", email.utils.formatdate(usegmt=True).encode('ISO-8859-1')))
if b"server" not in hkeys:
self.outheaders.append(
(b"Server", self.server.server_name.encode('ISO-8859-1')))
buf = [self.server.protocol.encode('ascii') + SPACE + self.status + CRLF]
for k, v in self.outheaders:
buf.append(k + COLON + SPACE + v + CRLF)
buf.append(CRLF)
self.conn.wfile.write(EMPTY.join(buf))
class NoSSLError(Exception):
"""Exception raised when a client speaks HTTP to an HTTPS socket."""
pass
class FatalSSLAlert(Exception):
"""Exception raised when the SSL implementation signals a fatal alert."""
pass
class CP_BufferedWriter(io.BufferedWriter):
"""Faux file object attached to a socket object."""
def write(self, b):
self._checkClosed()
if isinstance(b, str):
raise TypeError("can't write str to binary stream")
with self._write_lock:
self._write_buf.extend(b)
self._flush_unlocked()
return len(b)
def _flush_unlocked(self):
self._checkClosed("flush of closed file")
while self._write_buf:
try:
# ssl sockets only except 'bytes', not bytearrays
# so perhaps we should conditionally wrap this for perf?
n = self.raw.write(bytes(self._write_buf))
except io.BlockingIOError as e:
n = e.characters_written
del self._write_buf[:n]
def CP_makefile(sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
if 'r' in mode:
return io.BufferedReader(socket.SocketIO(sock, mode), bufsize)
else:
return CP_BufferedWriter(socket.SocketIO(sock, mode), bufsize)
class HTTPConnection(object):
"""An HTTP connection (active socket).
server: the Server object which received this connection.
socket: the raw socket object (usually TCP) for this connection.
makefile: a fileobject class for reading from the socket.
"""
remote_addr = None
remote_port = None
ssl_env = None
rbufsize = DEFAULT_BUFFER_SIZE
wbufsize = DEFAULT_BUFFER_SIZE
RequestHandlerClass = HTTPRequest
def __init__(self, server, sock, makefile=CP_makefile):
self.server = server
self.socket = sock
self.rfile = makefile(sock, "rb", self.rbufsize)
self.wfile = makefile(sock, "wb", self.wbufsize)
self.requests_seen = 0
def communicate(self):
"""Read each request and respond appropriately."""
request_seen = False
try:
while True:
# (re)set req to None so that if something goes wrong in
# the RequestHandlerClass constructor, the error doesn't
# get written to the previous request.
req = None
req = self.RequestHandlerClass(self.server, self)
# This order of operations should guarantee correct pipelining.
req.parse_request()
if self.server.stats['Enabled']:
self.requests_seen += 1
if not req.ready:
# Something went wrong in the parsing (and the server has
# probably already made a simple_response). Return and
# let the conn close.
return
request_seen = True
req.respond()
if req.close_connection:
return
except socket.error:
e = sys.exc_info()[1]
errnum = e.args[0]
# sadly SSL sockets return a different (longer) time out string
if errnum == 'timed out' or errnum == 'The read operation timed out':
# Don't error if we're between requests; only error
# if 1) no request has been started at all, or 2) we're
# in the middle of a request.
# See https://bitbucket.org/cherrypy/cherrypy/issue/853
if (not request_seen) or (req and req.started_request):
# Don't bother writing the 408 if the response
# has already started being written.
if req and not req.sent_headers:
try:
req.simple_response("408 Request Timeout")
except FatalSSLAlert:
# Close the connection.
return
elif errnum not in socket_errors_to_ignore:
self.server.error_log("socket.error %s" % repr(errnum),
level=logging.WARNING, traceback=True)
if req and not req.sent_headers:
try:
req.simple_response("500 Internal Server Error")
except FatalSSLAlert:
# Close the connection.
return
return
except (KeyboardInterrupt, SystemExit):
raise
except FatalSSLAlert:
# Close the connection.
return
except NoSSLError:
if req and not req.sent_headers:
# Unwrap our wfile
self.wfile = CP_makefile(self.socket._sock, "wb", self.wbufsize)
req.simple_response("400 Bad Request",
"The client sent a plain HTTP request, but "
"this server only speaks HTTPS on this port.")
self.linger = True
except Exception:
e = sys.exc_info()[1]
self.server.error_log(repr(e), level=logging.ERROR, traceback=True)
if req and not req.sent_headers:
try:
req.simple_response("500 Internal Server Error")
except FatalSSLAlert:
# Close the connection.
return
linger = False
def close(self):
"""Close the socket underlying this connection."""
self.rfile.close()
if not self.linger:
# Python's socket module does NOT call close on the kernel socket
# when you call socket.close(). We do so manually here because we
# want this server to send a FIN TCP segment immediately. Note this
# must be called *before* calling socket.close(), because the latter
# drops its reference to the kernel socket.
# Python 3 *probably* fixed this with socket._real_close; hard to tell.
## self.socket._sock.close()
self.socket.close()
else:
# On the other hand, sometimes we want to hang around for a bit
# to make sure the client has a chance to read our entire
# response. Skipping the close() calls here delays the FIN
# packet until the socket object is garbage-collected later.
# Someday, perhaps, we'll do the full lingering_close that
# Apache does, but not today.
pass
class TrueyZero(object):
"""An object which equals and does math like the integer '0' but evals True."""
def __add__(self, other):
return other
def __radd__(self, other):
return other
trueyzero = TrueyZero()
_SHUTDOWNREQUEST = None
class WorkerThread(threading.Thread):
"""Thread which continuously polls a Queue for Connection objects.
Due to the timing issues of polling a Queue, a WorkerThread does not
check its own 'ready' flag after it has started. To stop the thread,
it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue
(one for each running WorkerThread).
"""
conn = None
"""The current connection pulled off the Queue, or None."""
server = None
"""The HTTP Server which spawned this thread, and which owns the
Queue and is placing active connections into it."""
ready = False
"""A simple flag for the calling server to know when this thread
has begun polling the Queue."""
def __init__(self, server):
self.ready = False
self.server = server
self.requests_seen = 0
self.bytes_read = 0
self.bytes_written = 0
self.start_time = None
self.work_time = 0
self.stats = {
'Requests': lambda s: self.requests_seen + ((self.start_time is None) and trueyzero or self.conn.requests_seen),
'Bytes Read': lambda s: self.bytes_read + ((self.start_time is None) and trueyzero or self.conn.rfile.bytes_read),
'Bytes Written': lambda s: self.bytes_written + ((self.start_time is None) and trueyzero or self.conn.wfile.bytes_written),
'Work Time': lambda s: self.work_time + ((self.start_time is None) and trueyzero or time.time() - self.start_time),
'Read Throughput': lambda s: s['Bytes Read'](s) / (s['Work Time'](s) or 1e-6),
'Write Throughput': lambda s: s['Bytes Written'](s) / (s['Work Time'](s) or 1e-6),
}
threading.Thread.__init__(self)
def run(self):
self.server.stats['Worker Threads'][self.getName()] = self.stats
try:
self.ready = True
while True:
conn = self.server.requests.get()
if conn is _SHUTDOWNREQUEST:
return
self.conn = conn
if self.server.stats['Enabled']:
self.start_time = time.time()
try:
conn.communicate()
finally:
conn.close()
if self.server.stats['Enabled']:
self.requests_seen += self.conn.requests_seen
self.bytes_read += self.conn.rfile.bytes_read
self.bytes_written += self.conn.wfile.bytes_written
self.work_time += time.time() - self.start_time
self.start_time = None
self.conn = None
except (KeyboardInterrupt, SystemExit):
exc = sys.exc_info()[1]
self.server.interrupt = exc
class ThreadPool(object):
"""A Request Queue for an HTTPServer which pools threads.
ThreadPool objects must provide min, get(), put(obj), start()
and stop(timeout) attributes.
"""
def __init__(self, server, min=10, max=-1):
self.server = server
self.min = min
self.max = max
self._threads = []
self._queue = queue.Queue()
self.get = self._queue.get
def start(self):
"""Start the pool of threads."""
for i in range(self.min):
self._threads.append(WorkerThread(self.server))
for worker in self._threads:
worker.setName("CP Server " + worker.getName())
worker.start()
for worker in self._threads:
while not worker.ready:
time.sleep(.1)
def _get_idle(self):
"""Number of worker threads which are idle. Read-only."""
return len([t for t in self._threads if t.conn is None])
idle = property(_get_idle, doc=_get_idle.__doc__)
def put(self, obj):
self._queue.put(obj)
if obj is _SHUTDOWNREQUEST:
return
def grow(self, amount):
"""Spawn new worker threads (not above self.max)."""
if self.max > 0:
budget = max(self.max - len(self._threads), 0)
else:
# self.max <= 0 indicates no maximum
budget = float('inf')
n_new = min(amount, budget)
workers = [self._spawn_worker() for i in range(n_new)]
while not all(worker.ready for worker in workers):
time.sleep(.1)
self._threads.extend(workers)
def _spawn_worker(self):
worker = WorkerThread(self.server)
worker.setName("CP Server " + worker.getName())
worker.start()
return worker
def shrink(self, amount):
"""Kill off worker threads (not below self.min)."""
# Grow/shrink the pool if necessary.
# Remove any dead threads from our list
for t in self._threads:
if not t.isAlive():
self._threads.remove(t)
amount -= 1
# calculate the number of threads above the minimum
n_extra = max(len(self._threads) - self.min, 0)
# don't remove more than amount
n_to_remove = min(amount, n_extra)
# put shutdown requests on the queue equal to the number of threads
# to remove. As each request is processed by a worker, that worker
# will terminate and be culled from the list.
for n in range(n_to_remove):
self._queue.put(_SHUTDOWNREQUEST)
def stop(self, timeout=5):
# OmniMarkupPreviewer: Force shutdown without waiting too much
while self._get_qsize() > 0:
conn = self.get()
if conn is not _SHUTDOWNREQUEST:
conn.close()
# Must shut down threads here so the code that calls
# this method can know when all threads are stopped.
for worker in self._threads:
self._queue.put(_SHUTDOWNREQUEST)
# Don't join currentThread (when stop is called inside a request).
current = threading.currentThread()
if timeout and timeout >= 0:
endtime = time.time() + timeout
while self._threads:
worker = self._threads.pop()
if worker is not current and worker.isAlive():
try:
if timeout is None or timeout < 0:
worker.join()
else:
remaining_time = endtime - time.time()
if remaining_time > 0:
worker.join(remaining_time)
if worker.isAlive():
# We exhausted the timeout.
# Forcibly shut down the socket.
c = worker.conn
if c and not c.rfile.closed:
try:
c.socket.shutdown(socket.SHUT_RD)
except TypeError:
# pyOpenSSL sockets don't take an arg
c.socket.shutdown()
worker.join()
except (AssertionError,
# Ignore repeated Ctrl-C.
# See https://bitbucket.org/cherrypy/cherrypy/issue/691.
KeyboardInterrupt):
pass
def _get_qsize(self):
return self._queue.qsize()
qsize = property(_get_qsize)
try:
import fcntl
except ImportError:
try:
from ctypes import windll, WinError
import ctypes.wintypes
_SetHandleInformation = windll.kernel32.SetHandleInformation
_SetHandleInformation.argtypes = [
ctypes.wintypes.HANDLE,
ctypes.wintypes.DWORD,
ctypes.wintypes.DWORD,
]
_SetHandleInformation.restype = ctypes.wintypes.BOOL
except ImportError:
def prevent_socket_inheritance(sock):
"""Dummy function, since neither fcntl nor ctypes are available."""
pass
else:
def prevent_socket_inheritance(sock):
"""Mark the given socket fd as non-inheritable (Windows)."""
if not _SetHandleInformation(sock.fileno(), 1, 0):
raise WinError()
else:
def prevent_socket_inheritance(sock):
"""Mark the given socket fd as non-inheritable (POSIX)."""
fd = sock.fileno()
old_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
class SSLAdapter(object):
"""Base class for SSL driver library adapters.
Required methods:
* ``wrap(sock) -> (wrapped socket, ssl environ dict)``
* ``makefile(sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE) -> socket file object``
"""
def __init__(self, certificate, private_key, certificate_chain=None):
self.certificate = certificate
self.private_key = private_key
self.certificate_chain = certificate_chain
def wrap(self, sock):
raise NotImplemented
def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
raise NotImplemented
class HTTPServer(object):
"""An HTTP server."""
_bind_addr = "127.0.0.1"
_interrupt = None
gateway = None
"""A Gateway instance."""
minthreads = None
"""The minimum number of worker threads to create (default 10)."""
maxthreads = None
"""The maximum number of worker threads to create (default -1 = no limit)."""
server_name = None
"""The name of the server; defaults to socket.gethostname()."""
protocol = "HTTP/1.1"
"""The version string to write in the Status-Line of all HTTP responses.
For example, "HTTP/1.1" is the default. This also limits the supported
features used in the response."""
request_queue_size = 5
"""The 'backlog' arg to socket.listen(); max queued connections (default 5)."""
shutdown_timeout = 5
"""The total time, in seconds, to wait for worker threads to cleanly exit."""
timeout = 10
"""The timeout in seconds for accepted connections (default 10)."""
version = "CherryPy/3.2.4"
"""A version string for the HTTPServer."""
software = None
"""The value to set for the SERVER_SOFTWARE entry in the WSGI environ.
If None, this defaults to ``'%s Server' % self.version``."""
ready = False
"""An internal flag which marks whether the socket is accepting connections."""
max_request_header_size = 0
"""The maximum size, in bytes, for request headers, or 0 for no limit."""
max_request_body_size = 0
"""The maximum size, in bytes, for request bodies, or 0 for no limit."""
nodelay = True
"""If True (the default since 3.1), sets the TCP_NODELAY socket option."""
ConnectionClass = HTTPConnection
"""The class to use for handling HTTP connections."""
ssl_adapter = None
"""An instance of SSLAdapter (or a subclass).
You must have the corresponding SSL driver library installed."""
def __init__(self, bind_addr, gateway, minthreads=10, maxthreads=-1,
server_name=None):
self.bind_addr = bind_addr
self.gateway = gateway
self.requests = ThreadPool(self, min=minthreads or 1, max=maxthreads)
if not server_name:
server_name = socket.gethostname()
self.server_name = server_name
self.clear_stats()
def clear_stats(self):
self._start_time = None
self._run_time = 0
self.stats = {
'Enabled': False,
'Bind Address': lambda s: repr(self.bind_addr),
'Run time': lambda s: (not s['Enabled']) and -1 or self.runtime(),
'Accepts': 0,
'Accepts/sec': lambda s: s['Accepts'] / self.runtime(),
'Queue': lambda s: getattr(self.requests, "qsize", None),
'Threads': lambda s: len(getattr(self.requests, "_threads", [])),
'Threads Idle': lambda s: getattr(self.requests, "idle", None),
'Socket Errors': 0,
'Requests': lambda s: (not s['Enabled']) and -1 or sum([w['Requests'](w) for w
in s['Worker Threads'].values()], 0),
'Bytes Read': lambda s: (not s['Enabled']) and -1 or sum([w['Bytes Read'](w) for w
in s['Worker Threads'].values()], 0),
'Bytes Written': lambda s: (not s['Enabled']) and -1 or sum([w['Bytes Written'](w) for w
in s['Worker Threads'].values()], 0),
'Work Time': lambda s: (not s['Enabled']) and -1 or sum([w['Work Time'](w) for w
in s['Worker Threads'].values()], 0),
'Read Throughput': lambda s: (not s['Enabled']) and -1 or sum(
[w['Bytes Read'](w) / (w['Work Time'](w) or 1e-6)
for w in s['Worker Threads'].values()], 0),
'Write Throughput': lambda s: (not s['Enabled']) and -1 or sum(
[w['Bytes Written'](w) / (w['Work Time'](w) or 1e-6)
for w in s['Worker Threads'].values()], 0),
'Worker Threads': {},
}
logging.statistics["CherryPy HTTPServer %d" % id(self)] = self.stats
def runtime(self):
if self._start_time is None:
return self._run_time
else:
return self._run_time + (time.time() - self._start_time)
def __str__(self):
return "%s.%s(%r)" % (self.__module__, self.__class__.__name__,
self.bind_addr)
def _get_bind_addr(self):
return self._bind_addr
def _set_bind_addr(self, value):
if isinstance(value, tuple) and value[0] in ('', None):
# Despite the socket module docs, using '' does not
# allow AI_PASSIVE to work. Passing None instead
# returns '0.0.0.0' like we want. In other words:
# host AI_PASSIVE result
# '' Y 192.168.x.y
# '' N 192.168.x.y
# None Y 0.0.0.0
# None N 127.0.0.1
# But since you can get the same effect with an explicit
# '0.0.0.0', we deny both the empty string and None as values.
raise ValueError("Host values of '' or None are not allowed. "
"Use '0.0.0.0' (IPv4) or '::' (IPv6) instead "
"to listen on all active interfaces.")
self._bind_addr = value
bind_addr = property(_get_bind_addr, _set_bind_addr,
doc="""The interface on which to listen for connections.
For TCP sockets, a (host, port) tuple. Host values may be any IPv4
or IPv6 address, or any valid hostname. The string 'localhost' is a
synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
The string '0.0.0.0' is a special IPv4 entry meaning "any active
interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
IPv6. The empty string or None are not allowed.
For UNIX sockets, supply the filename as a string.""")
def start(self):
"""Run the server forever."""
# We don't have to trap KeyboardInterrupt or SystemExit here,
# because cherrpy.server already does so, calling self.stop() for us.
# If you're using this server with another framework, you should
# trap those exceptions in whatever code block calls start().
self._interrupt = None
if self.software is None:
self.software = "%s Server" % self.version
# Select the appropriate socket
if isinstance(self.bind_addr, basestring):
# AF_UNIX socket
# So we can reuse the socket...
try: os.unlink(self.bind_addr)
except: pass
# So everyone can access the socket...
try: os.chmod(self.bind_addr, 511) # 0777
except: pass
info = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, "", self.bind_addr)]
else:
# AF_INET or AF_INET6 socket
# Get the correct address family for our host (allows IPv6 addresses)
host, port = self.bind_addr
try:
info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
except socket.gaierror:
if ':' in self.bind_addr[0]:
info = [(socket.AF_INET6, socket.SOCK_STREAM,
0, "", self.bind_addr + (0, 0))]
else:
info = [(socket.AF_INET, socket.SOCK_STREAM,
0, "", self.bind_addr)]
self.socket = None
errors = ["No socket could be created:"]
for res in info:
af, socktype, proto, canonname, sa = res
try:
self.bind(af, socktype, proto)
except socket.error as serr:
errors.append("(%s: %s)" % (sa, serr))
if self.socket:
self.socket.close()
self.socket = None
continue
break
if not self.socket:
raise socket.error("\n".join(errors))
# Timeout so KeyboardInterrupt can be caught on Win32
self.socket.settimeout(1)
self.socket.listen(self.request_queue_size)
# Create worker threads
self.requests.start()
self.ready = True
self._start_time = time.time()
while self.ready:
try:
self.tick()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.error_log("Error in HTTPServer.tick", level=logging.ERROR,
traceback=True)
if self.interrupt:
while self.interrupt is True:
# Wait for self.stop() to complete. See _set_interrupt.
time.sleep(0.1)
if self.interrupt:
raise self.interrupt
def error_log(self, msg="", level=20, traceback=False):
# Override this in subclasses as desired
sys.stderr.write(msg + '\n')
sys.stderr.flush()
if traceback:
tblines = format_exc()
sys.stderr.write(tblines)
sys.stderr.flush()
def bind(self, family, type, proto=0):
"""Create (or recreate) the actual socket object."""
self.socket = socket.socket(family, type, proto)
prevent_socket_inheritance(self.socket)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if self.nodelay and not isinstance(self.bind_addr, str):
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if self.ssl_adapter is not None:
self.socket = self.ssl_adapter.bind(self.socket)
# If listening on the IPV6 any address ('::' = IN6ADDR_ANY),
# activate dual-stack. See https://bitbucket.org/cherrypy/cherrypy/issue/871.
if (hasattr(socket, 'AF_INET6') and family == socket.AF_INET6
and self.bind_addr[0] in ('::', '::0', '::0.0.0.0')):
try:
self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except (AttributeError, socket.error):
# Apparently, the socket option is not available in
# this machine's TCP stack
pass
self.socket.bind(self.bind_addr)
def tick(self):
"""Accept a new connection and put it on the Queue."""
try:
s, addr = self.socket.accept()
if self.stats['Enabled']:
self.stats['Accepts'] += 1
if not self.ready:
return
prevent_socket_inheritance(s)
if hasattr(s, 'settimeout'):
s.settimeout(self.timeout)
makefile = CP_makefile
ssl_env = {}
# if ssl cert and key are set, we try to be a secure HTTP server
if self.ssl_adapter is not None:
try:
s, ssl_env = self.ssl_adapter.wrap(s)
except NoSSLError:
msg = ("The client sent a plain HTTP request, but "
"this server only speaks HTTPS on this port.")
buf = ["%s 400 Bad Request\r\n" % self.protocol,
"Content-Length: %s\r\n" % len(msg),
"Content-Type: text/plain\r\n\r\n",
msg]
wfile = makefile(s, "wb", DEFAULT_BUFFER_SIZE)
try:
wfile.write("".join(buf).encode('ISO-8859-1'))
except socket.error:
x = sys.exc_info()[1]
if x.args[0] not in socket_errors_to_ignore:
raise
return
if not s:
return
makefile = self.ssl_adapter.makefile
# Re-apply our timeout since we may have a new socket object
if hasattr(s, 'settimeout'):
s.settimeout(self.timeout)
conn = self.ConnectionClass(self, s, makefile)
if not isinstance(self.bind_addr, basestring):
# optional values
# Until we do DNS lookups, omit REMOTE_HOST
if addr is None: # sometimes this can happen
# figure out if AF_INET or AF_INET6.
if len(s.getsockname()) == 2:
# AF_INET
addr = ('0.0.0.0', 0)
else:
# AF_INET6
addr = ('::', 0)
conn.remote_addr = addr[0]
conn.remote_port = addr[1]
conn.ssl_env = ssl_env
self.requests.put(conn)
except socket.timeout:
# The only reason for the timeout in start() is so we can
# notice keyboard interrupts on Win32, which don't interrupt
# accept() by default
return
except socket.error:
x = sys.exc_info()[1]
if self.stats['Enabled']:
self.stats['Socket Errors'] += 1
if x.args[0] in socket_error_eintr:
# I *think* this is right. EINTR should occur when a signal
# is received during the accept() call; all docs say retry
# the call, and I *think* I'm reading it right that Python
# will then go ahead and poll for and handle the signal
# elsewhere. See https://bitbucket.org/cherrypy/cherrypy/issue/707.
return
if x.args[0] in socket_errors_nonblocking:
# Just try again. See https://bitbucket.org/cherrypy/cherrypy/issue/479.
return
if x.args[0] in socket_errors_to_ignore:
# Our socket was closed.
# See https://bitbucket.org/cherrypy/cherrypy/issue/686.
return
raise
def _get_interrupt(self):
return self._interrupt
def _set_interrupt(self, interrupt):
self._interrupt = True
self.stop()
self._interrupt = interrupt
interrupt = property(_get_interrupt, _set_interrupt,
doc="Set this to an Exception instance to "
"interrupt the server.")
def stop(self):
"""Gracefully shutdown a server that is serving forever."""
self.ready = False
if self._start_time is not None:
self._run_time += (time.time() - self._start_time)
self._start_time = None
sock = getattr(self, "socket", None)
if sock:
if not isinstance(self.bind_addr, basestring):
# Touch our own socket to make accept() return immediately.
try:
host, port = sock.getsockname()[:2]
except socket.error:
x = sys.exc_info()[1]
if x.args[0] not in socket_errors_to_ignore:
# Changed to use error code and not message
# See https://bitbucket.org/cherrypy/cherrypy/issue/860.
raise
else:
# Note that we're explicitly NOT using AI_PASSIVE,
# here, because we want an actual IP to touch.
# localhost won't work if we've bound to a public IP,
# but it will if we bound to '0.0.0.0' (INADDR_ANY).
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See http://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.settimeout(1.0)
s.connect((host, port))
s.close()
except socket.error:
if s:
s.close()
if hasattr(sock, "close"):
sock.close()
self.socket = None
self.requests.stop(self.shutdown_timeout)
class Gateway(object):
"""A base class to interface HTTPServer with other systems, such as WSGI."""
def __init__(self, req):
self.req = req
def respond(self):
"""Process the current request. Must be overridden in a subclass."""
raise NotImplemented
# These may either be wsgiserver.SSLAdapter subclasses or the string names
# of such classes (in which case they will be lazily loaded).
ssl_adapters = {
'builtin': 'cherrypy.wsgiserver.ssl_builtin.BuiltinSSLAdapter',
}
def get_ssl_adapter_class(name='builtin'):
"""Return an SSL adapter class for the given name."""
adapter = ssl_adapters[name.lower()]
if isinstance(adapter, basestring):
last_dot = adapter.rfind(".")
attr_name = adapter[last_dot + 1:]
mod_path = adapter[:last_dot]
try:
mod = sys.modules[mod_path]
if mod is None:
raise KeyError()
except KeyError:
# The last [''] is important.
mod = __import__(mod_path, globals(), locals(), [''])
# Let an AttributeError propagate outward.
try:
adapter = getattr(mod, attr_name)
except AttributeError:
raise AttributeError("'%s' object has no attribute '%s'"
% (mod_path, attr_name))
return adapter
# -------------------------------- WSGI Stuff -------------------------------- #
class CherryPyWSGIServer(HTTPServer):
"""A subclass of HTTPServer which calls a WSGI application."""
wsgi_version = (1, 0)
"""The version of WSGI to produce."""
def __init__(self, bind_addr, wsgi_app, numthreads=10, server_name=None,
max=-1, request_queue_size=5, timeout=10, shutdown_timeout=5):
self.requests = ThreadPool(self, min=numthreads or 1, max=max)
self.wsgi_app = wsgi_app
self.gateway = wsgi_gateways[self.wsgi_version]
self.bind_addr = bind_addr
if not server_name:
server_name = socket.gethostname()
self.server_name = server_name
self.request_queue_size = request_queue_size
self.timeout = timeout
self.shutdown_timeout = shutdown_timeout
self.clear_stats()
def _get_numthreads(self):
return self.requests.min
def _set_numthreads(self, value):
self.requests.min = value
numthreads = property(_get_numthreads, _set_numthreads)
class WSGIGateway(Gateway):
"""A base class to interface HTTPServer with WSGI."""
def __init__(self, req):
self.req = req
self.started_response = False
self.env = self.get_environ()
self.remaining_bytes_out = None
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version"""
raise NotImplemented
def respond(self):
"""Process the current request."""
response = self.req.server.wsgi_app(self.env, self.start_response)
try:
for chunk in response:
# "The start_response callable must not actually transmit
# the response headers. Instead, it must store them for the
# server or gateway to transmit only after the first
# iteration of the application return value that yields
# a NON-EMPTY string, or upon the application's first
# invocation of the write() callable." (PEP 333)
if chunk:
if isinstance(chunk, unicodestr):
chunk = chunk.encode('ISO-8859-1')
self.write(chunk)
finally:
if hasattr(response, "close"):
response.close()
def start_response(self, status, headers, exc_info = None):
"""WSGI callable to begin the HTTP response."""
# "The application may call start_response more than once,
# if and only if the exc_info argument is provided."
if self.started_response and not exc_info:
raise AssertionError("WSGI start_response called a second "
"time with no exc_info.")
self.started_response = True
# "if exc_info is provided, and the HTTP headers have already been
# sent, start_response must raise an error, and should raise the
# exc_info tuple."
if self.req.sent_headers:
try:
raise exc_info[0](exc_info[1]).with_traceback(exc_info[2])
finally:
exc_info = None
# According to PEP 3333, when using Python 3, the response status
# and headers must be bytes masquerading as unicode; that is, they
# must be of type "str" but are restricted to code points in the
# "latin-1" set.
if not isinstance(status, str):
raise TypeError("WSGI response status is not of type str.")
self.req.status = status.encode('ISO-8859-1')
for k, v in headers:
if not isinstance(k, str):
raise TypeError("WSGI response header key %r is not of type str." % k)
if not isinstance(v, str):
raise TypeError("WSGI response header value %r is not of type str." % v)
if k.lower() == 'content-length':
self.remaining_bytes_out = int(v)
self.req.outheaders.append((k.encode('ISO-8859-1'), v.encode('ISO-8859-1')))
return self.write
def write(self, chunk):
"""WSGI callable to write unbuffered data to the client.
This method is also used internally by start_response (to write
data from the iterable returned by the WSGI application).
"""
if not self.started_response:
raise AssertionError("WSGI write called before start_response.")
chunklen = len(chunk)
rbo = self.remaining_bytes_out
if rbo is not None and chunklen > rbo:
if not self.req.sent_headers:
# Whew. We can send a 500 to the client.
self.req.simple_response("500 Internal Server Error",
"The requested resource returned more bytes than the "
"declared Content-Length.")
else:
# Dang. We have probably already sent data. Truncate the chunk
# to fit (so the client doesn't hang) and raise an error later.
chunk = chunk[:rbo]
if not self.req.sent_headers:
self.req.sent_headers = True
self.req.send_headers()
self.req.write(chunk)
if rbo is not None:
rbo -= chunklen
if rbo < 0:
raise ValueError(
"Response body exceeds the declared Content-Length.")
class WSGIGateway_10(WSGIGateway):
"""A Gateway class to interface HTTPServer with WSGI 1.0.x."""
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version"""
req = self.req
env = {
# set a non-standard environ entry so the WSGI app can know what
# the *real* server protocol is (and what features to support).
# See http://www.faqs.org/rfcs/rfc2145.html.
'ACTUAL_SERVER_PROTOCOL': req.server.protocol,
'PATH_INFO': req.path.decode('ISO-8859-1'),
'QUERY_STRING': req.qs.decode('ISO-8859-1'),
'REMOTE_ADDR': req.conn.remote_addr or '',
'REMOTE_PORT': str(req.conn.remote_port or ''),
'REQUEST_METHOD': req.method.decode('ISO-8859-1'),
'REQUEST_URI': req.uri,
'SCRIPT_NAME': '',
'SERVER_NAME': req.server.server_name,
# Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol.
'SERVER_PROTOCOL': req.request_protocol.decode('ISO-8859-1'),
'SERVER_SOFTWARE': req.server.software,
'wsgi.errors': sys.stderr,
'wsgi.input': req.rfile,
'wsgi.multiprocess': False,
'wsgi.multithread': True,
'wsgi.run_once': False,
'wsgi.url_scheme': req.scheme.decode('ISO-8859-1'),
'wsgi.version': (1, 0),
}
if isinstance(req.server.bind_addr, basestring):
# AF_UNIX. This isn't really allowed by WSGI, which doesn't
# address unix domain sockets. But it's better than nothing.
env["SERVER_PORT"] = ""
else:
env["SERVER_PORT"] = str(req.server.bind_addr[1])
# Request headers
for k, v in req.inheaders.items():
k = k.decode('ISO-8859-1').upper().replace("-", "_")
env["HTTP_" + k] = v.decode('ISO-8859-1')
# CONTENT_TYPE/CONTENT_LENGTH
ct = env.pop("HTTP_CONTENT_TYPE", None)
if ct is not None:
env["CONTENT_TYPE"] = ct
cl = env.pop("HTTP_CONTENT_LENGTH", None)
if cl is not None:
env["CONTENT_LENGTH"] = cl
if req.conn.ssl_env:
env.update(req.conn.ssl_env)
return env
class WSGIGateway_u0(WSGIGateway_10):
"""A Gateway class to interface HTTPServer with WSGI u.0.
WSGI u.0 is an experimental protocol, which uses unicode for keys and values
in both Python 2 and Python 3.
"""
def get_environ(self):
"""Return a new environ dict targeting the given wsgi.version"""
req = self.req
env_10 = WSGIGateway_10.get_environ(self)
env = env_10.copy()
env['wsgi.version'] = ('u', 0)
# Request-URI
env.setdefault('wsgi.url_encoding', 'utf-8')
try:
# SCRIPT_NAME is the empty string, who cares what encoding it is?
env["PATH_INFO"] = req.path.decode(env['wsgi.url_encoding'])
env["QUERY_STRING"] = req.qs.decode(env['wsgi.url_encoding'])
except UnicodeDecodeError:
# Fall back to latin 1 so apps can transcode if needed.
env['wsgi.url_encoding'] = 'ISO-8859-1'
env["PATH_INFO"] = env_10["PATH_INFO"]
env["QUERY_STRING"] = env_10["QUERY_STRING"]
return env
wsgi_gateways = {
(1, 0): WSGIGateway_10,
('u', 0): WSGIGateway_u0,
}
class WSGIPathInfoDispatcher(object):
"""A WSGI dispatcher for dispatch based on the PATH_INFO.
apps: a dict or list of (path_prefix, app) pairs.
"""
def __init__(self, apps):
try:
apps = list(apps.items())
except AttributeError:
pass
# Sort the apps by len(path), descending
apps.sort()
apps.reverse()
# The path_prefix strings must start, but not end, with a slash.
# Use "" instead of "/".
self.apps = [(p.rstrip("/"), a) for p, a in apps]
def __call__(self, environ, start_response):
path = environ["PATH_INFO"] or "/"
for p, app in self.apps:
# The apps list should be sorted by length, descending.
if path.startswith(p + "/") or path == p:
environ = environ.copy()
environ["SCRIPT_NAME"] = environ["SCRIPT_NAME"] + p
environ["PATH_INFO"] = path[len(p):]
return app(environ, start_response)
start_response('404 Not Found', [('Content-Type', 'text/plain'),
('Content-Length', '0')])
return ['']
|
exiahuang/SalesforceXyTools
|
libs/cherrypy/wsgiserver/wsgiserver3.py
|
Python
|
apache-2.0
| 77,620
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from pyspark import since, SparkContext
from pyspark.ml.common import _java2py, _py2java
from pyspark.ml.linalg import DenseMatrix, Vectors
from pyspark.ml.wrapper import JavaWrapper, _jvm
from pyspark.sql.column import Column, _to_seq
from pyspark.sql.functions import lit
class ChiSquareTest(object):
"""
Conduct Pearson's independence test for every feature against the label. For each feature,
the (feature, label) pairs are converted into a contingency matrix for which the Chi-squared
statistic is computed. All label and feature values must be categorical.
The null hypothesis is that the occurrence of the outcomes is statistically independent.
.. versionadded:: 2.2.0
"""
@staticmethod
@since("2.2.0")
def test(dataset, featuresCol, labelCol):
"""
Perform a Pearson's independence test using dataset.
:param dataset:
DataFrame of categorical labels and categorical features.
Real-valued features will be treated as categorical for each distinct value.
:param featuresCol:
Name of features column in dataset, of type `Vector` (`VectorUDT`).
:param labelCol:
Name of label column in dataset, of any numerical type.
:return:
DataFrame containing the test result for every feature against the label.
This DataFrame will contain a single Row with the following fields:
- `pValues: Vector`
- `degreesOfFreedom: Array[Int]`
- `statistics: Vector`
Each of these fields has one value per feature.
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.stat import ChiSquareTest
>>> dataset = [[0, Vectors.dense([0, 0, 1])],
... [0, Vectors.dense([1, 0, 1])],
... [1, Vectors.dense([2, 1, 1])],
... [1, Vectors.dense([3, 1, 1])]]
>>> dataset = spark.createDataFrame(dataset, ["label", "features"])
>>> chiSqResult = ChiSquareTest.test(dataset, 'features', 'label')
>>> chiSqResult.select("degreesOfFreedom").collect()[0]
Row(degreesOfFreedom=[3, 1, 0])
"""
sc = SparkContext._active_spark_context
javaTestObj = _jvm().org.apache.spark.ml.stat.ChiSquareTest
args = [_py2java(sc, arg) for arg in (dataset, featuresCol, labelCol)]
return _java2py(sc, javaTestObj.test(*args))
class Correlation(object):
"""
Compute the correlation matrix for the input dataset of Vectors using the specified method.
Methods currently supported: `pearson` (default), `spearman`.
.. note:: For Spearman, a rank correlation, we need to create an RDD[Double] for each column
and sort it in order to retrieve the ranks and then join the columns back into an RDD[Vector],
which is fairly costly. Cache the input Dataset before calling corr with `method = 'spearman'`
to avoid recomputing the common lineage.
.. versionadded:: 2.2.0
"""
@staticmethod
@since("2.2.0")
def corr(dataset, column, method="pearson"):
"""
Compute the correlation matrix with specified method using dataset.
:param dataset:
A Dataset or a DataFrame.
:param column:
The name of the column of vectors for which the correlation coefficient needs
to be computed. This must be a column of the dataset, and it must contain
Vector objects.
:param method:
String specifying the method to use for computing correlation.
Supported: `pearson` (default), `spearman`.
:return:
A DataFrame that contains the correlation matrix of the column of vectors. This
DataFrame contains a single row and a single column of name
'$METHODNAME($COLUMN)'.
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.stat import Correlation
>>> dataset = [[Vectors.dense([1, 0, 0, -2])],
... [Vectors.dense([4, 5, 0, 3])],
... [Vectors.dense([6, 7, 0, 8])],
... [Vectors.dense([9, 0, 0, 1])]]
>>> dataset = spark.createDataFrame(dataset, ['features'])
>>> pearsonCorr = Correlation.corr(dataset, 'features', 'pearson').collect()[0][0]
>>> print(str(pearsonCorr).replace('nan', 'NaN'))
DenseMatrix([[ 1. , 0.0556..., NaN, 0.4004...],
[ 0.0556..., 1. , NaN, 0.9135...],
[ NaN, NaN, 1. , NaN],
[ 0.4004..., 0.9135..., NaN, 1. ]])
>>> spearmanCorr = Correlation.corr(dataset, 'features', method='spearman').collect()[0][0]
>>> print(str(spearmanCorr).replace('nan', 'NaN'))
DenseMatrix([[ 1. , 0.1054..., NaN, 0.4 ],
[ 0.1054..., 1. , NaN, 0.9486... ],
[ NaN, NaN, 1. , NaN],
[ 0.4 , 0.9486... , NaN, 1. ]])
"""
sc = SparkContext._active_spark_context
javaCorrObj = _jvm().org.apache.spark.ml.stat.Correlation
args = [_py2java(sc, arg) for arg in (dataset, column, method)]
return _java2py(sc, javaCorrObj.corr(*args))
class KolmogorovSmirnovTest(object):
"""
Conduct the two-sided Kolmogorov Smirnov (KS) test for data sampled from a continuous
distribution.
By comparing the largest difference between the empirical cumulative
distribution of the sample data and the theoretical distribution we can provide a test for the
the null hypothesis that the sample data comes from that theoretical distribution.
.. versionadded:: 2.4.0
"""
@staticmethod
@since("2.4.0")
def test(dataset, sampleCol, distName, *params):
"""
Conduct a one-sample, two-sided Kolmogorov-Smirnov test for probability distribution
equality. Currently supports the normal distribution, taking as parameters the mean and
standard deviation.
:param dataset:
a Dataset or a DataFrame containing the sample of data to test.
:param sampleCol:
Name of sample column in dataset, of any numerical type.
:param distName:
a `string` name for a theoretical distribution, currently only support "norm".
:param params:
a list of `Double` values specifying the parameters to be used for the theoretical
distribution. For "norm" distribution, the parameters includes mean and variance.
:return:
A DataFrame that contains the Kolmogorov-Smirnov test result for the input sampled data.
This DataFrame will contain a single Row with the following fields:
- `pValue: Double`
- `statistic: Double`
>>> from pyspark.ml.stat import KolmogorovSmirnovTest
>>> dataset = [[-1.0], [0.0], [1.0]]
>>> dataset = spark.createDataFrame(dataset, ['sample'])
>>> ksResult = KolmogorovSmirnovTest.test(dataset, 'sample', 'norm', 0.0, 1.0).first()
>>> round(ksResult.pValue, 3)
1.0
>>> round(ksResult.statistic, 3)
0.175
>>> dataset = [[2.0], [3.0], [4.0]]
>>> dataset = spark.createDataFrame(dataset, ['sample'])
>>> ksResult = KolmogorovSmirnovTest.test(dataset, 'sample', 'norm', 3.0, 1.0).first()
>>> round(ksResult.pValue, 3)
1.0
>>> round(ksResult.statistic, 3)
0.175
"""
sc = SparkContext._active_spark_context
javaTestObj = _jvm().org.apache.spark.ml.stat.KolmogorovSmirnovTest
dataset = _py2java(sc, dataset)
params = [float(param) for param in params]
return _java2py(sc, javaTestObj.test(dataset, sampleCol, distName,
_jvm().PythonUtils.toSeq(params)))
class Summarizer(object):
"""
Tools for vectorized statistics on MLlib Vectors.
The methods in this package provide various statistics for Vectors contained inside DataFrames.
This class lets users pick the statistics they would like to extract for a given column.
>>> from pyspark.ml.stat import Summarizer
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> summarizer = Summarizer.metrics("mean", "count")
>>> df = sc.parallelize([Row(weight=1.0, features=Vectors.dense(1.0, 1.0, 1.0)),
... Row(weight=0.0, features=Vectors.dense(1.0, 2.0, 3.0))]).toDF()
>>> df.select(summarizer.summary(df.features, df.weight)).show(truncate=False)
+-----------------------------------+
|aggregate_metrics(features, weight)|
+-----------------------------------+
|[[1.0,1.0,1.0], 1] |
+-----------------------------------+
<BLANKLINE>
>>> df.select(summarizer.summary(df.features)).show(truncate=False)
+--------------------------------+
|aggregate_metrics(features, 1.0)|
+--------------------------------+
|[[1.0,1.5,2.0], 2] |
+--------------------------------+
<BLANKLINE>
>>> df.select(Summarizer.mean(df.features, df.weight)).show(truncate=False)
+--------------+
|mean(features)|
+--------------+
|[1.0,1.0,1.0] |
+--------------+
<BLANKLINE>
>>> df.select(Summarizer.mean(df.features)).show(truncate=False)
+--------------+
|mean(features)|
+--------------+
|[1.0,1.5,2.0] |
+--------------+
<BLANKLINE>
.. versionadded:: 2.4.0
"""
@staticmethod
@since("2.4.0")
def mean(col, weightCol=None):
"""
return a column of mean summary
"""
return Summarizer._get_single_metric(col, weightCol, "mean")
@staticmethod
@since("3.0.0")
def sum(col, weightCol=None):
"""
return a column of sum summary
"""
return Summarizer._get_single_metric(col, weightCol, "sum")
@staticmethod
@since("2.4.0")
def variance(col, weightCol=None):
"""
return a column of variance summary
"""
return Summarizer._get_single_metric(col, weightCol, "variance")
@staticmethod
@since("3.0.0")
def std(col, weightCol=None):
"""
return a column of std summary
"""
return Summarizer._get_single_metric(col, weightCol, "std")
@staticmethod
@since("2.4.0")
def count(col, weightCol=None):
"""
return a column of count summary
"""
return Summarizer._get_single_metric(col, weightCol, "count")
@staticmethod
@since("2.4.0")
def numNonZeros(col, weightCol=None):
"""
return a column of numNonZero summary
"""
return Summarizer._get_single_metric(col, weightCol, "numNonZeros")
@staticmethod
@since("2.4.0")
def max(col, weightCol=None):
"""
return a column of max summary
"""
return Summarizer._get_single_metric(col, weightCol, "max")
@staticmethod
@since("2.4.0")
def min(col, weightCol=None):
"""
return a column of min summary
"""
return Summarizer._get_single_metric(col, weightCol, "min")
@staticmethod
@since("2.4.0")
def normL1(col, weightCol=None):
"""
return a column of normL1 summary
"""
return Summarizer._get_single_metric(col, weightCol, "normL1")
@staticmethod
@since("2.4.0")
def normL2(col, weightCol=None):
"""
return a column of normL2 summary
"""
return Summarizer._get_single_metric(col, weightCol, "normL2")
@staticmethod
def _check_param(featuresCol, weightCol):
if weightCol is None:
weightCol = lit(1.0)
if not isinstance(featuresCol, Column) or not isinstance(weightCol, Column):
raise TypeError("featureCol and weightCol should be a Column")
return featuresCol, weightCol
@staticmethod
def _get_single_metric(col, weightCol, metric):
col, weightCol = Summarizer._check_param(col, weightCol)
return Column(JavaWrapper._new_java_obj("org.apache.spark.ml.stat.Summarizer." + metric,
col._jc, weightCol._jc))
@staticmethod
@since("2.4.0")
def metrics(*metrics):
"""
Given a list of metrics, provides a builder that it turns computes metrics from a column.
See the documentation of [[Summarizer]] for an example.
The following metrics are accepted (case sensitive):
- mean: a vector that contains the coefficient-wise mean.
- sum: a vector that contains the coefficient-wise sum.
- variance: a vector tha contains the coefficient-wise variance.
- std: a vector tha contains the coefficient-wise standard deviation.
- count: the count of all vectors seen.
- numNonzeros: a vector with the number of non-zeros for each coefficients
- max: the maximum for each coefficient.
- min: the minimum for each coefficient.
- normL2: the Euclidean norm for each coefficient.
- normL1: the L1 norm of each coefficient (sum of the absolute values).
:param metrics:
metrics that can be provided.
:return:
an object of :py:class:`pyspark.ml.stat.SummaryBuilder`
Note: Currently, the performance of this interface is about 2x~3x slower then using the RDD
interface.
"""
sc = SparkContext._active_spark_context
js = JavaWrapper._new_java_obj("org.apache.spark.ml.stat.Summarizer.metrics",
_to_seq(sc, metrics))
return SummaryBuilder(js)
class SummaryBuilder(JavaWrapper):
"""
A builder object that provides summary statistics about a given column.
Users should not directly create such builders, but instead use one of the methods in
:py:class:`pyspark.ml.stat.Summarizer`
.. versionadded:: 2.4.0
"""
def __init__(self, jSummaryBuilder):
super(SummaryBuilder, self).__init__(jSummaryBuilder)
@since("2.4.0")
def summary(self, featuresCol, weightCol=None):
"""
Returns an aggregate object that contains the summary of the column with the requested
metrics.
:param featuresCol:
a column that contains features Vector object.
:param weightCol:
a column that contains weight value. Default weight is 1.0.
:return:
an aggregate column that contains the statistics. The exact content of this
structure is determined during the creation of the builder.
"""
featuresCol, weightCol = Summarizer._check_param(featuresCol, weightCol)
return Column(self._java_obj.summary(featuresCol._jc, weightCol._jc))
class MultivariateGaussian(object):
"""Represents a (mean, cov) tuple
>>> m = MultivariateGaussian(Vectors.dense([11,12]), DenseMatrix(2, 2, (1.0, 3.0, 5.0, 2.0)))
>>> (m.mean, m.cov.toArray())
(DenseVector([11.0, 12.0]), array([[ 1., 5.],
[ 3., 2.]]))
.. versionadded:: 3.0.0
"""
def __init__(self, mean, cov):
self.mean = mean
self.cov = cov
if __name__ == "__main__":
import doctest
import numpy
import pyspark.ml.stat
from pyspark.sql import SparkSession
try:
# Numpy 1.14+ changed it's string format.
numpy.set_printoptions(legacy='1.13')
except TypeError:
pass
globs = pyspark.ml.stat.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder \
.master("local[2]") \
.appName("ml.stat tests") \
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
failure_count, test_count = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
|
goldmedal/spark
|
python/pyspark/ml/stat.py
|
Python
|
apache-2.0
| 16,949
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import sys
from resource_management import *
from webhcat import webhcat
from webhcat_service import webhcat_service
class WebHCatServer(Script):
def install(self, env):
self.install_packages(env)
def configure(self, env):
import params
env.set_params(params)
webhcat()
def start(self, env):
import params
env.set_params(params)
self.configure(env) # FOR SECURITY
webhcat_service(action = 'start')
def stop(self, env):
import params
env.set_params(params)
webhcat_service(action = 'stop')
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.webhcat_pid_file)
if __name__ == "__main__":
WebHCatServer().execute()
|
zouzhberk/ambaridemo
|
demo-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/webhcat_server.py
|
Python
|
apache-2.0
| 1,523
|
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains common constants for DFP scripts."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
|
krux/adspygoogle
|
scripts/adspygoogle/dfp/__init__.py
|
Python
|
apache-2.0
| 718
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import unittest
import mock
import google.auth
import google.datalab
import google.datalab.bigquery
import google.datalab.utils
from google.datalab.bigquery._api import Api
class TestCases(unittest.TestCase):
def validate(self, mock_http_request, expected_url, expected_args=None, expected_data=None,
expected_headers=None, expected_method=None):
url = mock_http_request.call_args[0][0]
kwargs = mock_http_request.call_args[1]
self.assertEquals(expected_url, url)
if expected_args is not None:
self.assertEquals(expected_args, kwargs['args'])
else:
self.assertNotIn('args', kwargs)
if expected_data is not None:
self.assertEquals(expected_data, kwargs['data'])
else:
self.assertNotIn('data', kwargs)
if expected_headers is not None:
self.assertEquals(expected_headers, kwargs['headers'])
else:
self.assertNotIn('headers', kwargs)
if expected_method is not None:
self.assertEquals(expected_method, kwargs['method'])
else:
self.assertNotIn('method', kwargs)
@mock.patch('google.datalab.utils.Http.request')
def test_jobs_insert_load(self, mock_http_request):
api = TestCases._create_api()
api.jobs_insert_load('SOURCE', google.datalab.bigquery._utils.TableName('p', 'd', 't', ''))
self.maxDiff = None
expected_data = {
'kind': 'bigquery#job',
'configuration': {
'load': {
'sourceUris': ['SOURCE'],
'destinationTable': {
'projectId': 'p',
'datasetId': 'd',
'tableId': 't'
},
'createDisposition': 'CREATE_NEVER',
'writeDisposition': 'WRITE_EMPTY',
'sourceFormat': 'CSV',
'fieldDelimiter': ',',
'allowJaggedRows': False,
'allowQuotedNewlines': False,
'encoding': 'UTF-8',
'ignoreUnknownValues': False,
'maxBadRecords': 0,
'quote': '"',
'skipLeadingRows': 0
}
}
}
self.validate(mock_http_request, 'https://www.googleapis.com/bigquery/v2/projects/p/jobs/',
expected_data=expected_data)
api.jobs_insert_load('SOURCE2', google.datalab.bigquery._utils.TableName('p2', 'd2', 't2', ''),
append=True, create=True, allow_jagged_rows=True,
allow_quoted_newlines=True, ignore_unknown_values=True,
source_format='JSON', max_bad_records=1)
expected_data = {
'kind': 'bigquery#job',
'configuration': {
'load': {
'sourceUris': ['SOURCE2'],
'destinationTable': {
'projectId': 'p2',
'datasetId': 'd2',
'tableId': 't2'
},
'createDisposition': 'CREATE_IF_NEEDED',
'writeDisposition': 'WRITE_APPEND',
'sourceFormat': 'JSON',
'ignoreUnknownValues': True,
'maxBadRecords': 1
}
}
}
self.validate(mock_http_request, 'https://www.googleapis.com/bigquery/v2/projects/p2/jobs/',
expected_data=expected_data)
@mock.patch('google.datalab.utils.Http.request')
def test_jobs_insert_query(self, mock_http_request):
context = TestCases._create_context()
context.config['bigquery_billing_tier'] = None
api = TestCases._create_api(context)
api.jobs_insert_query('SQL')
expected_data = {
'kind': 'bigquery#job',
'configuration': {
'query': {
'query': 'SQL',
'useQueryCache': True,
'allowLargeResults': False,
'useLegacySql': False,
},
'dryRun': False,
'priority': 'BATCH',
},
}
self.validate(mock_http_request, 'https://www.googleapis.com/bigquery/v2/projects/test/jobs/',
expected_data=expected_data)
context.config['bigquery_billing_tier'] = 1
api.jobs_insert_query('SQL2',
table_name=google.datalab.bigquery._utils.TableName('p', 'd', 't', ''),
append=True, dry_run=True, use_cache=False, batch=False,
allow_large_results=True)
expected_data = {
'kind': 'bigquery#job',
'configuration': {
'query': {
'query': 'SQL2',
'useQueryCache': False,
'allowLargeResults': True,
'useLegacySql': False,
'maximumBillingTier': 1,
'destinationTable': {
'projectId': 'p',
'datasetId': 'd',
'tableId': 't'
},
'writeDisposition': 'WRITE_APPEND',
},
'dryRun': True,
'priority': 'INTERACTIVE',
},
}
self.maxDiff = None
self.validate(mock_http_request, 'https://www.googleapis.com/bigquery/v2/projects/test/jobs/',
expected_data=expected_data)
@mock.patch('google.datalab.utils.Http.request')
def test_jobs_query_results(self, mock_http_request):
api = TestCases._create_api()
api.jobs_query_results('JOB', 'PROJECT', 10, 20, 30)
self.validate(mock_http_request,
'https://www.googleapis.com/bigquery/v2/projects/PROJECT/queries/JOB',
expected_args={'maxResults': 10, 'timeoutMs': 20, 'startIndex': 30})
@mock.patch('google.datalab.utils.Http.request')
def test_jobs_get(self, mock_http_request):
api = TestCases._create_api()
api.jobs_get('JOB', 'PROJECT')
self.validate(mock_http_request,
'https://www.googleapis.com/bigquery/v2/projects/PROJECT/jobs/JOB')
@mock.patch('google.datalab.utils.Http.request')
def test_datasets_insert(self, mock_http_request):
api = TestCases._create_api()
api.datasets_insert(google.datalab.bigquery._utils.DatasetName('p', 'd'))
expected_data = {
'kind': 'bigquery#dataset',
'datasetReference': {
'projectId': 'p',
'datasetId': 'd',
}
}
self.validate(mock_http_request, 'https://www.googleapis.com/bigquery/v2/projects/p/datasets/',
expected_data=expected_data)
api.datasets_insert(google.datalab.bigquery._utils.DatasetName('p', 'd'), 'FRIENDLY',
'DESCRIPTION')
expected_data = {
'kind': 'bigquery#dataset',
'datasetReference': {
'projectId': 'p',
'datasetId': 'd'
},
'friendlyName': 'FRIENDLY',
'description': 'DESCRIPTION'
}
self.validate(mock_http_request, 'https://www.googleapis.com/bigquery/v2/projects/p/datasets/',
expected_data=expected_data)
@mock.patch('google.datalab.utils.Http.request')
def test_datasets_delete(self, mock_http_request):
api = TestCases._create_api()
api.datasets_delete(google.datalab.bigquery._utils.DatasetName('p', 'd'), False)
self.validate(mock_http_request, 'https://www.googleapis.com/bigquery/v2/projects/p/datasets/d',
expected_args={},
expected_method='DELETE')
api.datasets_delete(google.datalab.bigquery._utils.DatasetName('p', 'd'), True)
self.validate(mock_http_request, 'https://www.googleapis.com/bigquery/v2/projects/p/datasets/d',
expected_args={'deleteContents': True},
expected_method='DELETE')
@mock.patch('google.datalab.utils.Http.request')
def test_datasets_update(self, mock_http_request):
api = TestCases._create_api()
api.datasets_update(google.datalab.bigquery._utils.DatasetName('p', 'd'), 'INFO')
self.validate(mock_http_request, 'https://www.googleapis.com/bigquery/v2/projects/p/datasets/d',
expected_method='PUT', expected_data='INFO')
@mock.patch('google.datalab.utils.Http.request')
def test_datasets_get(self, mock_http_request):
api = TestCases._create_api()
api.datasets_get(google.datalab.bigquery._utils.DatasetName('p', 'd'))
self.validate(mock_http_request, 'https://www.googleapis.com/bigquery/v2/projects/p/datasets/d')
@mock.patch('google.datalab.utils.Http.request')
def test_datasets_list(self, mock_http_request):
api = TestCases._create_api()
api.datasets_list()
self.validate(mock_http_request,
'https://www.googleapis.com/bigquery/v2/projects/test/datasets/',
expected_args={})
api.datasets_list('PROJECT', 10, 'TOKEN')
self.validate(mock_http_request,
'https://www.googleapis.com/bigquery/v2/projects/PROJECT/datasets/',
expected_args={'maxResults': 10, 'pageToken': 'TOKEN'})
@mock.patch('google.datalab.utils.Http.request')
def test_tables_get(self, mock_http_request):
api = TestCases._create_api()
api.tables_get(google.datalab.bigquery._utils.TableName('p', 'd', 't', ''))
self.validate(mock_http_request,
'https://www.googleapis.com/bigquery/v2/projects/p/datasets/d/tables/t')
@mock.patch('google.datalab.utils.Http.request')
def test_tables_list(self, mock_http_request):
api = TestCases._create_api()
api.tables_list(google.datalab.bigquery._utils.DatasetName('p', 'd'))
self.validate(mock_http_request,
'https://www.googleapis.com/bigquery/v2/projects/p/datasets/d/tables/',
expected_args={})
api.tables_list(google.datalab.bigquery._utils.DatasetName('p', 'd'), 10, 'TOKEN')
self.validate(mock_http_request,
'https://www.googleapis.com/bigquery/v2/projects/p/datasets/d/tables/',
expected_args={'maxResults': 10, 'pageToken': 'TOKEN'})
@mock.patch('google.datalab.utils.Http.request')
def test_tables_insert(self, mock_http_request):
api = TestCases._create_api()
api.tables_insert(google.datalab.bigquery._utils.TableName('p', 'd', 't', ''))
expected_data = {
'kind': 'bigquery#table',
'tableReference': {
'projectId': 'p',
'datasetId': 'd',
'tableId': 't'
}
}
self.validate(mock_http_request,
'https://www.googleapis.com/bigquery/v2/projects/p/datasets/d/tables/',
expected_data=expected_data)
api.tables_insert(google.datalab.bigquery._utils.TableName('p', 'd', 't', ''),
'SCHEMA', 'QUERY', 'FRIENDLY', 'DESCRIPTION')
expected_data = {
'kind': 'bigquery#table',
'tableReference': {
'projectId': 'p',
'datasetId': 'd',
'tableId': 't'
},
'schema': {
'fields': 'SCHEMA'
},
'view': {'query': 'QUERY'},
'friendlyName': 'FRIENDLY',
'description': 'DESCRIPTION'
}
self.validate(mock_http_request,
'https://www.googleapis.com/bigquery/v2/projects/p/datasets/d/tables/',
expected_data=expected_data)
@mock.patch('google.datalab.utils.Http.request')
def test_tabledata_insertAll(self, mock_http_request):
api = TestCases._create_api()
api.tabledata_insert_all(google.datalab.bigquery._utils.TableName('p', 'd', 't', ''), 'ROWS')
expected_data = {
'kind': 'bigquery#tableDataInsertAllRequest',
'rows': 'ROWS'
}
self.validate(mock_http_request,
'https://www.googleapis.com/bigquery/v2/projects/p/datasets/d/tables/t/insertAll',
expected_data=expected_data)
@mock.patch('google.datalab.utils.Http.request')
def test_tabledata_list(self, mock_http_request):
api = TestCases._create_api()
api.tabledata_list(google.datalab.bigquery._utils.TableName('p', 'd', 't', ''))
self.validate(mock_http_request,
'https://www.googleapis.com/bigquery/v2/projects/p/datasets/d/tables/t/data',
expected_args={})
api.tabledata_list(google.datalab.bigquery._utils.TableName('p', 'd', 't', ''), 10, 20, 'TOKEN')
self.validate(mock_http_request,
'https://www.googleapis.com/bigquery/v2/projects/p/datasets/d/tables/t/data',
expected_args={
'startIndex': 10,
'maxResults': 20,
'pageToken': 'TOKEN'
})
@mock.patch('google.datalab.utils.Http.request')
def test_table_delete(self, mock_http_request):
api = TestCases._create_api()
api.table_delete(google.datalab.bigquery._utils.TableName('p', 'd', 't', ''))
self.validate(mock_http_request,
'https://www.googleapis.com/bigquery/v2/projects/p/datasets/d/tables/t',
expected_method='DELETE')
@mock.patch('google.datalab.utils.Http.request')
def test_table_extract(self, mock_http_request):
api = TestCases._create_api()
api.table_extract(google.datalab.bigquery._utils.TableName('p', 'd', 't', ''), 'DEST')
expected_data = {
'kind': 'bigquery#job',
'configuration': {
'extract': {
'sourceTable': {
'projectId': 'p',
'datasetId': 'd',
'tableId': 't'
},
'compression': 'GZIP',
'fieldDelimiter': ',',
'printHeader': True,
'destinationUris': ['DEST'],
'destinationFormat': 'CSV',
}
}
}
self.validate(mock_http_request,
'https://www.googleapis.com/bigquery/v2/projects/p/jobs/',
expected_data=expected_data)
api.table_extract(google.datalab.bigquery._utils.TableName('p', 'd', 't', ''),
['DEST'], format='JSON', compress=False, field_delimiter=':',
print_header=False)
expected_data = {
'kind': 'bigquery#job',
'configuration': {
'extract': {
'sourceTable': {
'projectId': 'p',
'datasetId': 'd',
'tableId': 't'
},
'compression': 'NONE',
'fieldDelimiter': ':',
'printHeader': False,
'destinationUris': ['DEST'],
'destinationFormat': 'JSON',
}
}
}
self.validate(mock_http_request, 'https://www.googleapis.com/bigquery/v2/projects/p/jobs/',
expected_data=expected_data)
@mock.patch('google.datalab.utils.Http.request')
def test_table_update(self, mock_http_request):
api = TestCases._create_api()
api.table_update(google.datalab.bigquery._utils.TableName('p', 'd', 't', ''), 'INFO')
self.validate(mock_http_request,
'https://www.googleapis.com/bigquery/v2/projects/p/datasets/d/tables/t',
expected_method='PUT', expected_data='INFO')
@staticmethod
def _create_api(context=None):
if not context:
context = TestCases._create_context()
return Api(context)
@staticmethod
def _create_context():
project_id = 'test'
creds = mock.Mock(spec=google.auth.credentials.Credentials)
return google.datalab.Context(project_id, creds)
|
craigcitro/pydatalab
|
tests/bigquery/api_tests.py
|
Python
|
apache-2.0
| 15,440
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Search
# Returns current committees, subcommittees, and their membership.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class Search(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the Search Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(Search, self).__init__(temboo_session, '/Library/SunlightLabs/Congress/Legislator/Search')
def new_input_set(self):
return SearchInputSet()
def _make_result_set(self, result, path):
return SearchResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return SearchChoreographyExecution(session, exec_id, path)
class SearchInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the Search
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Sunlight Labs.)
"""
super(SearchInputSet, self)._set_input('APIKey', value)
def set_AllLegislators(self, value):
"""
Set the value of the AllLegislators input for this Choreo. ((optional, boolean) A boolean flag indicating to search for all legislators even when they are no longer in office.)
"""
super(SearchInputSet, self)._set_input('AllLegislators', value)
def set_Fields(self, value):
"""
Set the value of the Fields input for this Choreo. ((optional, string) A comma-separated list of fields to include in the response.)
"""
super(SearchInputSet, self)._set_input('Fields', value)
def set_Filters(self, value):
"""
Set the value of the Filters input for this Choreo. ((optional, string) A JSON object containing key/value pairs to be used as filters.)
"""
super(SearchInputSet, self)._set_input('Filters', value)
def set_Name(self, value):
"""
Set the value of the Name input for this Choreo. ((optional, string) Deprecated (retained for backward compatibility only).)
"""
super(SearchInputSet, self)._set_input('Name', value)
def set_Order(self, value):
"""
Set the value of the Order input for this Choreo. ((optional, string) Used to order the results by field name (e.g. field__asc).)
"""
super(SearchInputSet, self)._set_input('Order', value)
def set_Page(self, value):
"""
Set the value of the Page input for this Choreo. ((optional, integer) The page offset.)
"""
super(SearchInputSet, self)._set_input('Page', value)
def set_PerPage(self, value):
"""
Set the value of the PerPage input for this Choreo. ((optional, integer) The number of results to return per page.)
"""
super(SearchInputSet, self)._set_input('PerPage', value)
def set_Query(self, value):
"""
Set the value of the Query input for this Choreo. ((conditional, string) A search term.)
"""
super(SearchInputSet, self)._set_input('Query', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: json (the default) and xml.)
"""
super(SearchInputSet, self)._set_input('ResponseFormat', value)
class SearchResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the Search Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from the Sunlight Congress API.)
"""
return self._output.get('Response', None)
class SearchChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return SearchResultSet(response, path)
|
jordanemedlock/psychtruths
|
temboo/core/Library/SunlightLabs/Congress/Legislator/Search.py
|
Python
|
apache-2.0
| 5,201
|
"""Support for Xiaomi Gateways."""
from datetime import timedelta
import logging
import voluptuous as vol
from xiaomi_gateway import XiaomiGateway, XiaomiGatewayDiscovery
from homeassistant import config_entries, core
from homeassistant.const import (
ATTR_BATTERY_LEVEL,
ATTR_DEVICE_ID,
ATTR_VOLTAGE,
CONF_HOST,
CONF_MAC,
CONF_PORT,
CONF_PROTOCOL,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import callback
from homeassistant.helpers import device_registry as dr
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.device_registry import format_mac
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util.dt import utcnow
from .const import (
CONF_INTERFACE,
CONF_KEY,
CONF_SID,
DEFAULT_DISCOVERY_RETRY,
DOMAIN,
GATEWAYS_KEY,
LISTENER_KEY,
)
_LOGGER = logging.getLogger(__name__)
GATEWAY_PLATFORMS = ["binary_sensor", "sensor", "switch", "light", "cover", "lock"]
GATEWAY_PLATFORMS_NO_KEY = ["binary_sensor", "sensor"]
ATTR_GW_MAC = "gw_mac"
ATTR_RINGTONE_ID = "ringtone_id"
ATTR_RINGTONE_VOL = "ringtone_vol"
TIME_TILL_UNAVAILABLE = timedelta(minutes=150)
SERVICE_PLAY_RINGTONE = "play_ringtone"
SERVICE_STOP_RINGTONE = "stop_ringtone"
SERVICE_ADD_DEVICE = "add_device"
SERVICE_REMOVE_DEVICE = "remove_device"
SERVICE_SCHEMA_PLAY_RINGTONE = vol.Schema(
{
vol.Required(ATTR_RINGTONE_ID): vol.All(
vol.Coerce(int), vol.NotIn([9, 14, 15, 16, 17, 18, 19])
),
vol.Optional(ATTR_RINGTONE_VOL): vol.All(
vol.Coerce(int), vol.Clamp(min=0, max=100)
),
}
)
SERVICE_SCHEMA_REMOVE_DEVICE = vol.Schema(
{vol.Required(ATTR_DEVICE_ID): vol.All(cv.string, vol.Length(min=14, max=14))}
)
def setup(hass, config):
"""Set up the Xiaomi component."""
def play_ringtone_service(call):
"""Service to play ringtone through Gateway."""
ring_id = call.data.get(ATTR_RINGTONE_ID)
gateway = call.data.get(ATTR_GW_MAC)
kwargs = {"mid": ring_id}
ring_vol = call.data.get(ATTR_RINGTONE_VOL)
if ring_vol is not None:
kwargs["vol"] = ring_vol
gateway.write_to_hub(gateway.sid, **kwargs)
def stop_ringtone_service(call):
"""Service to stop playing ringtone on Gateway."""
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, mid=10000)
def add_device_service(call):
"""Service to add a new sub-device within the next 30 seconds."""
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, join_permission="yes")
hass.components.persistent_notification.async_create(
"Join permission enabled for 30 seconds! "
"Please press the pairing button of the new device once.",
title="Xiaomi Aqara Gateway",
)
def remove_device_service(call):
"""Service to remove a sub-device from the gateway."""
device_id = call.data.get(ATTR_DEVICE_ID)
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, remove_device=device_id)
gateway_only_schema = _add_gateway_to_schema(hass, vol.Schema({}))
hass.services.register(
DOMAIN,
SERVICE_PLAY_RINGTONE,
play_ringtone_service,
schema=_add_gateway_to_schema(hass, SERVICE_SCHEMA_PLAY_RINGTONE),
)
hass.services.register(
DOMAIN, SERVICE_STOP_RINGTONE, stop_ringtone_service, schema=gateway_only_schema
)
hass.services.register(
DOMAIN, SERVICE_ADD_DEVICE, add_device_service, schema=gateway_only_schema
)
hass.services.register(
DOMAIN,
SERVICE_REMOVE_DEVICE,
remove_device_service,
schema=_add_gateway_to_schema(hass, SERVICE_SCHEMA_REMOVE_DEVICE),
)
return True
async def async_setup_entry(
hass: core.HomeAssistant, entry: config_entries.ConfigEntry
):
"""Set up the xiaomi aqara components from a config entry."""
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN].setdefault(GATEWAYS_KEY, {})
# Connect to Xiaomi Aqara Gateway
xiaomi_gateway = await hass.async_add_executor_job(
XiaomiGateway,
entry.data[CONF_HOST],
entry.data[CONF_SID],
entry.data[CONF_KEY],
DEFAULT_DISCOVERY_RETRY,
entry.data[CONF_INTERFACE],
entry.data[CONF_PORT],
entry.data[CONF_PROTOCOL],
)
hass.data[DOMAIN][GATEWAYS_KEY][entry.entry_id] = xiaomi_gateway
gateway_discovery = hass.data[DOMAIN].setdefault(
LISTENER_KEY,
XiaomiGatewayDiscovery(hass.add_job, [], entry.data[CONF_INTERFACE]),
)
if len(hass.data[DOMAIN][GATEWAYS_KEY]) == 1:
# start listining for local pushes (only once)
await hass.async_add_executor_job(gateway_discovery.listen)
# register stop callback to shutdown listining for local pushes
def stop_xiaomi(event):
"""Stop Xiaomi Socket."""
_LOGGER.debug("Shutting down Xiaomi Gateway Listener")
gateway_discovery.stop_listen()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_xiaomi)
gateway_discovery.gateways[entry.data[CONF_HOST]] = xiaomi_gateway
_LOGGER.debug(
"Gateway with host '%s' connected, listening for broadcasts",
entry.data[CONF_HOST],
)
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers={(DOMAIN, entry.unique_id)},
manufacturer="Xiaomi Aqara",
name=entry.title,
sw_version=entry.data[CONF_PROTOCOL],
)
if entry.data[CONF_KEY] is not None:
platforms = GATEWAY_PLATFORMS
else:
platforms = GATEWAY_PLATFORMS_NO_KEY
hass.config_entries.async_setup_platforms(entry, platforms)
return True
async def async_unload_entry(
hass: core.HomeAssistant, entry: config_entries.ConfigEntry
):
"""Unload a config entry."""
if entry.data[CONF_KEY] is not None:
platforms = GATEWAY_PLATFORMS
else:
platforms = GATEWAY_PLATFORMS_NO_KEY
unload_ok = await hass.config_entries.async_unload_platforms(entry, platforms)
if unload_ok:
hass.data[DOMAIN][GATEWAYS_KEY].pop(entry.entry_id)
if len(hass.data[DOMAIN][GATEWAYS_KEY]) == 0:
# No gateways left, stop Xiaomi socket
hass.data[DOMAIN].pop(GATEWAYS_KEY)
_LOGGER.debug("Shutting down Xiaomi Gateway Listener")
gateway_discovery = hass.data[DOMAIN].pop(LISTENER_KEY)
await hass.async_add_executor_job(gateway_discovery.stop_listen)
return unload_ok
class XiaomiDevice(Entity):
"""Representation a base Xiaomi device."""
def __init__(self, device, device_type, xiaomi_hub, config_entry):
"""Initialize the Xiaomi device."""
self._state = None
self._is_available = True
self._sid = device["sid"]
self._model = device["model"]
self._protocol = device["proto"]
self._name = f"{device_type}_{self._sid}"
self._device_name = f"{self._model}_{self._sid}"
self._type = device_type
self._write_to_hub = xiaomi_hub.write_to_hub
self._get_from_hub = xiaomi_hub.get_from_hub
self._extra_state_attributes = {}
self._remove_unavailability_tracker = None
self._xiaomi_hub = xiaomi_hub
self.parse_data(device["data"], device["raw_data"])
self.parse_voltage(device["data"])
if hasattr(self, "_data_key") and self._data_key: # pylint: disable=no-member
self._unique_id = (
f"{self._data_key}{self._sid}" # pylint: disable=no-member
)
else:
self._unique_id = f"{self._type}{self._sid}"
self._gateway_id = config_entry.unique_id
if config_entry.data[CONF_MAC] == format_mac(self._sid):
# this entity belongs to the gateway itself
self._is_gateway = True
self._device_id = config_entry.unique_id
else:
# this entity is connected through zigbee
self._is_gateway = False
self._device_id = self._sid
def _add_push_data_job(self, *args):
self.hass.add_job(self.push_data, *args)
async def async_added_to_hass(self):
"""Start unavailability tracking."""
self._xiaomi_hub.callbacks[self._sid].append(self._add_push_data_job)
self._async_track_unavailable()
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def device_id(self):
"""Return the device id of the Xiaomi Aqara device."""
return self._device_id
@property
def device_info(self):
"""Return the device info of the Xiaomi Aqara device."""
if self._is_gateway:
device_info = {
"identifiers": {(DOMAIN, self._device_id)},
"model": self._model,
}
else:
device_info = {
"connections": {(dr.CONNECTION_ZIGBEE, self._device_id)},
"identifiers": {(DOMAIN, self._device_id)},
"manufacturer": "Xiaomi Aqara",
"model": self._model,
"name": self._device_name,
"sw_version": self._protocol,
"via_device": (DOMAIN, self._gateway_id),
}
return device_info
@property
def available(self):
"""Return True if entity is available."""
return self._is_available
@property
def should_poll(self):
"""Return the polling state. No polling needed."""
return False
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return self._extra_state_attributes
@callback
def _async_set_unavailable(self, now):
"""Set state to UNAVAILABLE."""
self._remove_unavailability_tracker = None
self._is_available = False
self.async_write_ha_state()
@callback
def _async_track_unavailable(self):
if self._remove_unavailability_tracker:
self._remove_unavailability_tracker()
self._remove_unavailability_tracker = async_track_point_in_utc_time(
self.hass, self._async_set_unavailable, utcnow() + TIME_TILL_UNAVAILABLE
)
if not self._is_available:
self._is_available = True
return True
return False
@callback
def push_data(self, data, raw_data):
"""Push from Hub."""
_LOGGER.debug("PUSH >> %s: %s", self, data)
was_unavailable = self._async_track_unavailable()
is_data = self.parse_data(data, raw_data)
is_voltage = self.parse_voltage(data)
if is_data or is_voltage or was_unavailable:
self.async_write_ha_state()
def parse_voltage(self, data):
"""Parse battery level data sent by gateway."""
if "voltage" in data:
voltage_key = "voltage"
elif "battery_voltage" in data:
voltage_key = "battery_voltage"
else:
return False
max_volt = 3300
min_volt = 2800
voltage = data[voltage_key]
self._extra_state_attributes[ATTR_VOLTAGE] = round(voltage / 1000.0, 2)
voltage = min(voltage, max_volt)
voltage = max(voltage, min_volt)
percent = ((voltage - min_volt) / (max_volt - min_volt)) * 100
self._extra_state_attributes[ATTR_BATTERY_LEVEL] = round(percent, 1)
return True
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
raise NotImplementedError()
def _add_gateway_to_schema(hass, schema):
"""Extend a voluptuous schema with a gateway validator."""
def gateway(sid):
"""Convert sid to a gateway."""
sid = str(sid).replace(":", "").lower()
for gateway in hass.data[DOMAIN][GATEWAYS_KEY].values():
if gateway.sid == sid:
return gateway
raise vol.Invalid(f"Unknown gateway sid {sid}")
kwargs = {}
xiaomi_data = hass.data.get(DOMAIN)
if xiaomi_data is not None:
gateways = list(xiaomi_data[GATEWAYS_KEY].values())
# If the user has only 1 gateway, make it the default for services.
if len(gateways) == 1:
kwargs["default"] = gateways[0].sid
return schema.extend({vol.Required(ATTR_GW_MAC, **kwargs): gateway})
|
sander76/home-assistant
|
homeassistant/components/xiaomi_aqara/__init__.py
|
Python
|
apache-2.0
| 12,713
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations often used for initializing tensors.
All variable initializers returned by functions in this file should have the
following signature:
def _initializer(shape, dtype=dtypes.float32):
Args:
shape: List of `int` representing the shape of the output `Tensor`. Some
initializers may also be able to accept a `Tensor`.
dtype: (Optional) Type of the output `Tensor`.
Returns:
A `Tensor` of type `dtype` and `shape`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import linalg_ops_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.util.tf_export import tf_export
class Initializer(object):
"""Initializer base class: all initializers inherit from this class.
"""
def __call__(self, shape, dtype=None):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. If not provided will return tensor
of `tf.float32`.
"""
raise NotImplementedError
def get_config(self):
"""Returns the configuration of the initializer as a JSON-serializable dict.
Returns:
A JSON-serializable Python dict.
"""
return {}
@classmethod
def from_config(cls, config):
"""Instantiates an initializer from a configuration dictionary.
Example:
```python
initializer = RandomUniform(-1, 1)
config = initializer.get_config()
initializer = RandomUniform.from_config(config)
```
Args:
config: A Python dictionary.
It will typically be the output of `get_config`.
Returns:
An Initializer instance.
"""
config.pop("dtype", None)
return cls(**config)
@tf_export("zeros_initializer", v1=[])
class Zeros(Initializer):
"""Initializer that generates tensors initialized to 0."""
def __call__(self, shape, dtype=dtypes.float32):
dtype = dtypes.as_dtype(dtype)
return array_ops.zeros(shape, dtype)
@tf_export("ones_initializer", v1=[])
class Ones(Initializer):
"""Initializer that generates tensors initialized to 1."""
def __call__(self, shape, dtype=dtypes.float32):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are
supported.
Raises:
ValuesError: If the dtype is not numeric or boolean.
"""
dtype = dtypes.as_dtype(dtype)
if not dtype.is_numpy_compatible or dtype == dtypes.string:
raise ValueError("Expected numeric or boolean dtype, got %s." % dtype)
return array_ops.ones(shape, dtype)
@tf_export("constant_initializer", v1=[])
class Constant(Initializer):
"""Initializer that generates tensors with constant values.
The resulting tensor is populated with values of type `dtype`, as
specified by arguments `value` following the desired `shape` of the
new tensor (see examples below).
The argument `value` can be a constant value, or a list of values of type
`dtype`. If `value` is a list, then the length of the list must be less
than or equal to the number of elements implied by the desired shape of the
tensor. In the case where the total number of elements in `value` is less
than the number of elements required by the tensor shape, the last element
in `value` will be used to fill the remaining entries. If the total number of
elements in `value` is greater than the number of elements required by the
tensor shape, the initializer will raise a `ValueError`.
Args:
value: A Python scalar, list or tuple of values, or a N-dimensional numpy
array. All elements of the initialized variable will be set to the
corresponding value in the `value` argument.
Raises:
TypeError: If the input `value` is not one of the expected types.
Examples:
The following example can be rewritten using a numpy.ndarray instead
of the `value` list, even reshaped, as shown in the two commented lines
below the `value` list initialization.
```python
>>> import numpy as np
>>> import tensorflow as tf
>>> value = [0, 1, 2, 3, 4, 5, 6, 7]
>>> # value = np.array(value)
>>> # value = value.reshape([2, 4])
>>> init = tf.constant_initializer(value)
>>> print('fitting shape:')
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[2, 4], initializer=init)
>>> x.initializer.run()
>>> print(x.eval())
fitting shape:
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]]
>>> print('larger shape:')
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[3, 4], initializer=init)
>>> x.initializer.run()
>>> print(x.eval())
larger shape:
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]
[ 7. 7. 7. 7.]]
>>> print('smaller shape:')
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[2, 3], initializer=init)
ValueError: Too many elements provided. Needed at most 6, but received 8
```
"""
def __init__(self, value=0):
if not (np.isscalar(value) or isinstance(value, (list, tuple, np.ndarray))):
raise TypeError(
"Invalid type for initial value: %s (expected Python scalar, list or "
"tuple of values, or numpy.ndarray)." % type(value))
self.value = value
def __call__(self, shape, dtype=None):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. If not provided the dtype of the
tensor created will be the type of the inital value.
Raises:
TypeError: If the initializer cannot create a tensor of the requested
dtype.
"""
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
return constant_op.constant(
self.value, dtype=dtype, shape=shape)
def get_config(self):
return {"value": self.value}
@tf_export("random_uniform_initializer", v1=[])
class RandomUniform(Initializer):
"""Initializer that generates tensors with a uniform distribution.
Args:
minval: A python scalar or a scalar tensor. Lower bound of the range
of random values to generate.
maxval: A python scalar or a scalar tensor. Upper bound of the range
of random values to generate. Defaults to 1 for float types.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
"""
def __init__(self, minval=-0.05, maxval=0.05, seed=None):
self.minval = minval
self.maxval = maxval
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=dtypes.float32):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point and integer
types are supported.
Raises:
ValueError: If the dtype is not numeric.
"""
dtype = dtypes.as_dtype(dtype)
if not dtype.is_floating and not dtype.is_integer:
raise ValueError("Expected float or integer dtype, got %s." % dtype)
return self._random_generator.random_uniform(shape, self.minval,
self.maxval, dtype)
def get_config(self):
return {
"minval": self.minval,
"maxval": self.maxval,
"seed": self.seed
}
@tf_export("random_normal_initializer", v1=[])
class RandomNormal(Initializer):
"""Initializer that generates tensors with a normal distribution.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=dtypes.float32):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
Raises:
ValueError: If the dtype is not floating point
"""
dtype = _assert_float_dtype(dtype)
return self._random_generator.random_normal(shape, self.mean, self.stddev,
dtype)
def get_config(self):
return {
"mean": self.mean,
"stddev": self.stddev,
"seed": self.seed
}
class TruncatedNormal(Initializer):
"""Initializer that generates a truncated normal distribution.
These values are similar to values from a `random_normal_initializer`
except that values more than two standard deviations from the mean
are discarded and re-drawn. This is the recommended initializer for
neural network weights and filters.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
"""
def __init__(self, mean=0.0, stddev=0.05, seed=None):
self.mean = mean
self.stddev = stddev
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=dtypes.float32):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
Raises:
ValueError: If the dtype is not floating point
"""
dtype = _assert_float_dtype(dtype)
return self._random_generator.truncated_normal(shape, self.mean,
self.stddev, dtype)
def get_config(self):
return {
"mean": self.mean,
"stddev": self.stddev,
"seed": self.seed
}
class VarianceScaling(Initializer):
"""Initializer capable of adapting its scale to the shape of weights tensors.
With `distribution="truncated_normal" or "untruncated_normal"`,
samples are drawn from a truncated/untruncated normal
distribution with a mean of zero and a standard deviation (after truncation,
if used) `stddev = sqrt(scale / n)`
where n is:
- number of input units in the weight tensor, if mode = "fan_in"
- number of output units, if mode = "fan_out"
- average of the numbers of input and output units, if mode = "fan_avg"
With `distribution="uniform"`, samples are drawn from a uniform distribution
within [-limit, limit], with `limit = sqrt(3 * scale / n)`.
Args:
scale: Scaling factor (positive float).
mode: One of "fan_in", "fan_out", "fan_avg".
distribution: Random distribution to use. One of "truncated_normal",
"untruncated_normal" and "uniform".
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
Raises:
ValueError: In case of an invalid value for the "scale", mode" or
"distribution" arguments.
"""
def __init__(self,
scale=1.0,
mode="fan_in",
distribution="truncated_normal",
seed=None):
if scale <= 0.:
raise ValueError("`scale` must be positive float.")
if mode not in {"fan_in", "fan_out", "fan_avg"}:
raise ValueError("Invalid `mode` argument:", mode)
distribution = distribution.lower()
if distribution not in {"uniform", "truncated_normal",
"untruncated_normal"}:
raise ValueError("Invalid `distribution` argument:", distribution)
self.scale = scale
self.mode = mode
self.distribution = distribution
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=dtypes.float32):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
Raises:
ValueError: If the dtype is not floating point
"""
partition_info = None # Keeps logic so can be readded later if necessary
dtype = _assert_float_dtype(dtype)
scale = self.scale
scale_shape = shape
if partition_info is not None:
scale_shape = partition_info.full_shape
fan_in, fan_out = _compute_fans(scale_shape)
if self.mode == "fan_in":
scale /= max(1., fan_in)
elif self.mode == "fan_out":
scale /= max(1., fan_out)
else:
scale /= max(1., (fan_in + fan_out) / 2.)
if self.distribution == "truncated_normal":
# constant from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)
stddev = math.sqrt(scale) / .87962566103423978
return self._random_generator.truncated_normal(shape, 0.0, stddev, dtype)
elif self.distribution == "untruncated_normal":
stddev = math.sqrt(scale)
return self._random_generator.random_normal(shape, 0.0, stddev, dtype)
else:
limit = math.sqrt(3.0 * scale)
return self._random_generator.random_uniform(shape, -limit, limit, dtype)
def get_config(self):
return {
"scale": self.scale,
"mode": self.mode,
"distribution": self.distribution,
"seed": self.seed
}
class Orthogonal(Initializer):
"""Initializer that generates an orthogonal matrix.
If the shape of the tensor to initialize is two-dimensional, it is initialized
with an orthogonal matrix obtained from the QR decomposition of a matrix of
random numbers drawn from a normal distribution.
If the matrix has fewer rows than columns then the output will have orthogonal
rows. Otherwise, the output will have orthogonal columns.
If the shape of the tensor to initialize is more than two-dimensional,
a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])`
is initialized, where `n` is the length of the shape vector.
The matrix is subsequently reshaped to give a tensor of the desired shape.
Args:
gain: multiplicative factor to apply to the orthogonal matrix
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
References:
[Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C)
([pdf](https://arxiv.org/pdf/1312.6120.pdf))
"""
def __init__(self, gain=1.0, seed=None):
self.gain = gain
self.seed = seed
self._random_generator = _RandomGenerator(seed)
def __call__(self, shape, dtype=dtypes.float32):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
Raises:
ValueError: If the dtype is not floating point or the input shape is not
valid.
"""
dtype = _assert_float_dtype(dtype)
# Check the shape
if len(shape) < 2:
raise ValueError("The tensor to initialize must be "
"at least two-dimensional")
# Flatten the input shape with the last dimension remaining
# its original shape so it works for conv2d
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (max(num_cols, num_rows), min(num_cols, num_rows))
# Generate a random matrix
a = self._random_generator.random_normal(flat_shape, dtype=dtype)
# Compute the qr factorization
q, r = gen_linalg_ops.qr(a, full_matrices=False)
# Make Q uniform
d = array_ops.diag_part(r)
q *= math_ops.sign(d)
if num_rows < num_cols:
q = array_ops.matrix_transpose(q)
return self.gain * array_ops.reshape(q, shape)
def get_config(self):
return {"gain": self.gain, "seed": self.seed}
class Identity(Initializer):
"""Initializer that generates the identity matrix.
Only use for 2D matrices.
Args:
gain: Multiplicative factor to apply to the identity matrix.
"""
def __init__(self, gain=1.0):
self.gain = gain
def __call__(self, shape, dtype=dtypes.float32):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. Only floating point types are
supported.
Raises:
ValueError: If the dtype is not floating point
"""
partition_info = None # Keeps logic so can be readded later if necessary
dtype = _assert_float_dtype(dtype)
full_shape = shape if partition_info is None else partition_info.full_shape
if len(full_shape) != 2:
raise ValueError(
"Identity matrix initializer can only be used for 2D matrices.")
initializer = linalg_ops_impl.eye(*full_shape, dtype=dtype)
if partition_info is not None:
initializer = array_ops.slice(initializer, partition_info.var_offset,
shape)
return self.gain * initializer
def get_config(self):
return {"gain": self.gain}
class GlorotUniform(VarianceScaling):
"""The Glorot uniform initializer, also called Xavier uniform initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(6 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
Args:
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
References:
[Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))
"""
def __init__(self, seed=None):
super(GlorotUniform, self).__init__(
scale=1.0,
mode="fan_avg",
distribution="uniform",
seed=seed)
def get_config(self):
return {"seed": self.seed}
class GlorotNormal(VarianceScaling):
"""The Glorot normal initializer, also called Xavier normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(2 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
Args:
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed` for behavior.
References:
[Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))
"""
def __init__(self, seed=None):
super(GlorotNormal, self).__init__(
scale=1.0,
mode="fan_avg",
distribution="truncated_normal",
seed=seed)
def get_config(self):
return {"seed": self.seed}
# Aliases.
# pylint: disable=invalid-name
zeros_initializer = Zeros
ones_initializer = Ones
constant_initializer = Constant
random_uniform_initializer = RandomUniform
random_normal_initializer = RandomNormal
truncated_normal_initializer = TruncatedNormal
variance_scaling_initializer = VarianceScaling
glorot_uniform_initializer = GlorotUniform
glorot_normal_initializer = GlorotNormal
orthogonal_initializer = Orthogonal
identity_initializer = Identity
# pylint: enable=invalid-name
def lecun_normal(seed=None):
"""LeCun normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(1 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
Arguments:
seed: A Python integer. Used to seed the random generator.
Returns:
An initializer.
References:
- Self-Normalizing Neural Networks,
[Klambauer et al., 2017]
(https://papers.nips.cc/paper/6698-self-normalizing-neural-networks)
([pdf]
(https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))
- Efficient Backprop,
[Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
return VarianceScaling(
scale=1., mode="fan_in", distribution="truncated_normal", seed=seed)
def lecun_uniform(seed=None):
"""LeCun uniform initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(3 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
Arguments:
seed: A Python integer. Used to seed the random generator.
Returns:
An initializer.
References:
- Self-Normalizing Neural Networks,
[Klambauer et al., 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks) # pylint: disable=line-too-long
([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))
- Efficient Backprop,
[Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
return VarianceScaling(
scale=1., mode="fan_in", distribution="uniform", seed=seed)
def he_normal(seed=None):
"""He normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(2 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
Arguments:
seed: A Python integer. Used to seed the random generator.
Returns:
An initializer.
References:
[He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long
([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))
"""
return VarianceScaling(
scale=2., mode="fan_in", distribution="truncated_normal", seed=seed)
def he_uniform(seed=None):
"""He uniform variance scaling initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(6 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
Arguments:
seed: A Python integer. Used to seed the random generator.
Returns:
An initializer.
References:
[He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html) # pylint: disable=line-too-long
([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))
"""
return VarianceScaling(
scale=2., mode="fan_in", distribution="uniform", seed=seed)
# Utility functions.
def _compute_fans(shape):
"""Computes the number of input and output units for a weight shape.
Args:
shape: Integer shape tuple or TF tensor shape.
Returns:
A tuple of scalars (fan_in, fan_out).
"""
if len(shape) < 1: # Just to avoid errors for constants.
fan_in = fan_out = 1
elif len(shape) == 1:
fan_in = fan_out = shape[0]
elif len(shape) == 2:
fan_in = shape[0]
fan_out = shape[1]
else:
# Assuming convolution kernels (2D, 3D, or more).
# kernel shape: (..., input_depth, depth)
receptive_field_size = 1.
for dim in shape[:-2]:
receptive_field_size *= dim
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
return fan_in, fan_out
def _assert_float_dtype(dtype):
"""Validate and return floating point type based on `dtype`.
`dtype` must be a floating point type.
Args:
dtype: The data type to validate.
Returns:
Validated type.
Raises:
ValueError: if `dtype` is not a floating point type.
"""
dtype = dtypes.as_dtype(dtype)
if not dtype.is_floating:
raise ValueError("Expected floating point type, got %s." % dtype)
return dtype
class _RandomGenerator(object):
"""Random generator that selects appropriate random ops."""
def __init__(self, seed=None):
super(_RandomGenerator, self).__init__()
if seed is not None:
# Stateless random ops requires 2-int seed.
self.seed = [seed, 0]
else:
self.seed = None
def random_normal(self, shape, mean=0.0, stddev=1, dtype=dtypes.float32):
"""A deterministic random normal if seed is passed."""
if self.seed:
op = stateless_random_ops.stateless_random_normal
else:
op = random_ops.random_normal
return op(
shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)
def random_uniform(self, shape, minval, maxval, dtype):
"""A deterministic random uniform if seed is passed."""
if self.seed:
op = stateless_random_ops.stateless_random_uniform
else:
op = random_ops.random_uniform
return op(
shape=shape, minval=minval, maxval=maxval, dtype=dtype, seed=self.seed)
def truncated_normal(self, shape, mean, stddev, dtype):
"""A deterministic truncated normal if seed is passed."""
if self.seed:
op = stateless_random_ops.stateless_truncated_normal
else:
op = random_ops.truncated_normal
return op(
shape=shape, mean=mean, stddev=stddev, dtype=dtype, seed=self.seed)
# Compatibility aliases
# pylint: disable=invalid-name
zero = zeros = Zeros
one = ones = Ones
constant = Constant
uniform = random_uniform = RandomUniform
normal = random_normal = RandomNormal
truncated_normal = TruncatedNormal
identity = Identity
orthogonal = Orthogonal
glorot_normal = GlorotNormal
glorot_uniform = GlorotUniform
|
jbedorf/tensorflow
|
tensorflow/python/ops/init_ops_v2.py
|
Python
|
apache-2.0
| 26,725
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains Google PubSub operators.
"""
import warnings
from typing import Dict, List, Optional, Sequence, Tuple, Union
from google.api_core.retry import Retry
from google.cloud.pubsub_v1.types import Duration, MessageStoragePolicy, PushConfig
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.pubsub import PubSubHook
from airflow.utils.decorators import apply_defaults
class PubSubCreateTopicOperator(BaseOperator):
"""Create a PubSub topic.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PubSubCreateTopicOperator`
By default, if the topic already exists, this operator will
not cause the DAG to fail. ::
with DAG('successful DAG') as dag:
(
dag
>> PubSubTopicCreateOperator(project='my-project',
topic='my_new_topic')
>> PubSubTopicCreateOperator(project='my-project',
topic='my_new_topic')
)
The operator can be configured to fail if the topic already exists. ::
with DAG('failing DAG') as dag:
(
dag
>> PubSubTopicCreateOperator(project='my-project',
topic='my_new_topic')
>> PubSubTopicCreateOperator(project='my-project',
topic='my_new_topic',
fail_if_exists=True)
)
Both ``project`` and ``topic`` are templated so you can use
variables in them.
:param project_id: Optional, the GCP project ID where the topic will be created.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param topic: the topic to create. Do not include the
full topic path. In other words, instead of
``projects/{project}/topics/{topic}``, provide only
``{topic}``. (templated)
:type topic: str
:param gcp_conn_id: The connection ID to use connecting to
Google Cloud Platform.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request
must have domain-wide delegation enabled.
:type delegate_to: str
:param labels: Client-assigned labels; see
https://cloud.google.com/pubsub/docs/labels
:type labels: Dict[str, str]
:param message_storage_policy: Policy constraining the set
of Google Cloud Platform regions where messages published to
the topic may be stored. If not present, then no constraints
are in effect.
:type message_storage_policy:
Union[Dict, google.cloud.pubsub_v1.types.MessageStoragePolicy]
:param kms_key_name: The resource name of the Cloud KMS CryptoKey
to be used to protect access to messages published on this topic.
The expected format is
``projects/*/locations/*/keyRings/*/cryptoKeys/*``.
:type kms_key_name: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]]
:param project: (Deprecated) the GCP project ID where the topic will be created
:type project: str
"""
template_fields = ['project_id', 'topic']
ui_color = '#0273d4'
# pylint: disable=too-many-arguments
@apply_defaults
def __init__(
self,
topic: str,
project_id: Optional[str] = None,
fail_if_exists: bool = False,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
labels: Optional[Dict[str, str]] = None,
message_storage_policy: Union[Dict, MessageStoragePolicy] = None,
kms_key_name: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
project: Optional[str] = None,
*args,
**kwargs) -> None:
# To preserve backward compatibility
# TODO: remove one day
if project:
warnings.warn(
"The project parameter has been deprecated. You should pass "
"the project_id parameter.", DeprecationWarning, stacklevel=2)
project_id = project
super().__init__(*args, **kwargs)
self.project_id = project_id
self.topic = topic
self.fail_if_exists = fail_if_exists
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.labels = labels
self.message_storage_policy = message_storage_policy
self.kms_key_name = kms_key_name
self.retry = retry
self.timeout = timeout
self.metadata = metadata
def execute(self, context):
hook = PubSubHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
self.log.info("Creating topic %s", self.topic)
hook.create_topic(
project_id=self.project_id,
topic=self.topic,
fail_if_exists=self.fail_if_exists,
labels=self.labels,
message_storage_policy=self.message_storage_policy,
kms_key_name=self.kms_key_name,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata
)
self.log.info("Created topic %s", self.topic)
class PubSubCreateSubscriptionOperator(BaseOperator):
"""Create a PubSub subscription.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PubSubCreateSubscriptionOperator`
By default, the subscription will be created in ``topic_project``. If
``subscription_project`` is specified and the GCP credentials allow, the
Subscription can be created in a different project from its topic.
By default, if the subscription already exists, this operator will
not cause the DAG to fail. However, the topic must exist in the project. ::
with DAG('successful DAG') as dag:
(
dag
>> PubSubSubscriptionCreateOperator(
topic_project='my-project', topic='my-topic',
subscription='my-subscription')
>> PubSubSubscriptionCreateOperator(
topic_project='my-project', topic='my-topic',
subscription='my-subscription')
)
The operator can be configured to fail if the subscription already exists.
::
with DAG('failing DAG') as dag:
(
dag
>> PubSubSubscriptionCreateOperator(
topic_project='my-project', topic='my-topic',
subscription='my-subscription')
>> PubSubSubscriptionCreateOperator(
topic_project='my-project', topic='my-topic',
subscription='my-subscription', fail_if_exists=True)
)
Finally, subscription is not required. If not passed, the operator will
generated a universally unique identifier for the subscription's name. ::
with DAG('DAG') as dag:
(
dag >> PubSubSubscriptionCreateOperator(
topic_project='my-project', topic='my-topic')
)
``topic_project``, ``topic``, ``subscription``, and
``subscription`` are templated so you can use variables in them.
:param project_id: Optional, the GCP project ID where the topic exists.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param topic: the topic to create. Do not include the
full topic path. In other words, instead of
``projects/{project}/topics/{topic}``, provide only
``{topic}``. (templated)
:type topic: str
:param subscription: the Pub/Sub subscription name. If empty, a random
name will be generated using the uuid module
:type subscription: str
:param subscription_project_id: the GCP project ID where the subscription
will be created. If empty, ``topic_project`` will be used.
:type subscription_project_id: str
:param ack_deadline_secs: Number of seconds that a subscriber has to
acknowledge each message pulled from the subscription
:type ack_deadline_secs: int
:param gcp_conn_id: The connection ID to use connecting to
Google Cloud Platform.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request
must have domain-wide delegation enabled.
:type delegate_to: str
:param push_config: If push delivery is used with this subscription,
this field is used to configure it. An empty ``pushConfig`` signifies
that the subscriber will pull and ack messages using API methods.
:type push_config: Union[Dict, google.cloud.pubsub_v1.types.PushConfig]
:param retain_acked_messages: Indicates whether to retain acknowledged
messages. If true, then messages are not expunged from the subscription's
backlog, even if they are acknowledged, until they fall out of the
``message_retention_duration`` window. This must be true if you would
like to Seek to a timestamp.
:type retain_acked_messages: bool
:param message_retention_duration: How long to retain unacknowledged messages
in the subscription's backlog, from the moment a message is published. If
``retain_acked_messages`` is true, then this also configures the
retention of acknowledged messages, and thus configures how far back in
time a ``Seek`` can be done. Defaults to 7 days. Cannot be more than 7
days or less than 10 minutes.
:type message_retention_duration: Union[Dict, google.cloud.pubsub_v1.types.Duration]
:param labels: Client-assigned labels; see
https://cloud.google.com/pubsub/docs/labels
:type labels: Dict[str, str]
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]]
:param topic_project: (Deprecated) the GCP project ID where the topic exists
:type topic_project: str
:param subscription_project: (Deprecated) the GCP project ID where the subscription
will be created. If empty, ``topic_project`` will be used.
:type subscription_project: str
"""
template_fields = ['project_id', 'topic', 'subscription', 'subscription_project_id']
ui_color = '#0273d4'
# pylint: disable=too-many-arguments
@apply_defaults
def __init__(
self,
topic: str,
project_id: Optional[str] = None,
subscription: Optional[str] = None,
subscription_project_id: Optional[str] = None,
ack_deadline_secs: int = 10,
fail_if_exists: bool = False,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
push_config: Optional[Union[Dict, PushConfig]] = None,
retain_acked_messages: Optional[bool] = None,
message_retention_duration: Optional[Union[Dict, Duration]] = None,
labels: Optional[Dict[str, str]] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
topic_project: Optional[str] = None,
subscription_project: Optional[str] = None,
*args,
**kwargs) -> None:
# To preserve backward compatibility
# TODO: remove one day
if topic_project:
warnings.warn(
"The topic_project parameter has been deprecated. You should pass "
"the project_id parameter.", DeprecationWarning, stacklevel=2)
project_id = topic_project
if subscription_project:
warnings.warn(
"The project_id parameter has been deprecated. You should pass "
"the subscription_project parameter.", DeprecationWarning, stacklevel=2)
subscription_project_id = subscription_project
super().__init__(*args, **kwargs)
self.project_id = project_id
self.topic = topic
self.subscription = subscription
self.subscription_project_id = subscription_project_id
self.ack_deadline_secs = ack_deadline_secs
self.fail_if_exists = fail_if_exists
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.push_config = push_config
self.retain_acked_messages = retain_acked_messages
self.message_retention_duration = message_retention_duration
self.labels = labels
self.retry = retry
self.timeout = timeout
self.metadata = metadata
def execute(self, context):
hook = PubSubHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
self.log.info("Creating subscription for topic %s", self.topic)
result = hook.create_subscription(
project_id=self.project_id,
topic=self.topic,
subscription=self.subscription,
subscription_project_id=self.subscription_project_id,
ack_deadline_secs=self.ack_deadline_secs,
fail_if_exists=self.fail_if_exists,
push_config=self.push_config,
retain_acked_messages=self.retain_acked_messages,
message_retention_duration=self.message_retention_duration,
labels=self.labels,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata
)
self.log.info("Created subscription for topic %s", self.topic)
return result
class PubSubDeleteTopicOperator(BaseOperator):
"""Delete a PubSub topic.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PubSubDeleteTopicOperator`
By default, if the topic does not exist, this operator will
not cause the DAG to fail. ::
with DAG('successful DAG') as dag:
(
dag
>> PubSubTopicDeleteOperator(project='my-project',
topic='non_existing_topic')
)
The operator can be configured to fail if the topic does not exist. ::
with DAG('failing DAG') as dag:
(
dag
>> PubSubTopicCreateOperator(project='my-project',
topic='non_existing_topic',
fail_if_not_exists=True)
)
Both ``project`` and ``topic`` are templated so you can use
variables in them.
:param project_id: Optional, the GCP project ID in which to work (templated).
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param topic: the topic to delete. Do not include the
full topic path. In other words, instead of
``projects/{project}/topics/{topic}``, provide only
``{topic}``. (templated)
:type topic: str
:param fail_if_not_exists: If True and the topic does not exist, fail
the task
:type fail_if_not_exists: bool
:param gcp_conn_id: The connection ID to use connecting to
Google Cloud Platform.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request
must have domain-wide delegation enabled.
:type delegate_to: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]]
:param project: (Deprecated) the GCP project ID where the topic will be created
:type project: str
"""
template_fields = ['project_id', 'topic']
ui_color = '#cb4335'
@apply_defaults
def __init__(
self,
topic: str,
project_id: Optional[str] = None,
fail_if_not_exists: bool = False,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
project: Optional[str] = None,
*args,
**kwargs) -> None:
# To preserve backward compatibility
# TODO: remove one day
if project:
warnings.warn(
"The project parameter has been deprecated. You should pass "
"the project_id parameter.", DeprecationWarning, stacklevel=2)
project_id = project
super().__init__(*args, **kwargs)
self.project_id = project_id
self.topic = topic
self.fail_if_not_exists = fail_if_not_exists
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.retry = retry
self.timeout = timeout
self.metadata = metadata
def execute(self, context):
hook = PubSubHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
self.log.info("Deleting topic %s", self.topic)
hook.delete_topic(
project_id=self.project_id,
topic=self.topic,
fail_if_not_exists=self.fail_if_not_exists,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata
)
self.log.info("Deleted topic %s", self.topic)
class PubSubDeleteSubscriptionOperator(BaseOperator):
"""Delete a PubSub subscription.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PubSubDeleteSubscriptionOperator`
By default, if the subscription does not exist, this operator will
not cause the DAG to fail. ::
with DAG('successful DAG') as dag:
(
dag
>> PubSubSubscriptionDeleteOperator(project='my-project',
subscription='non-existing')
)
The operator can be configured to fail if the subscription already exists.
::
with DAG('failing DAG') as dag:
(
dag
>> PubSubSubscriptionDeleteOperator(
project='my-project', subscription='non-existing',
fail_if_not_exists=True)
)
``project``, and ``subscription`` are templated so you can use
variables in them.
:param project_id: Optional, the GCP project ID in which to work (templated).
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param subscription: the subscription to delete. Do not include the
full subscription path. In other words, instead of
``projects/{project}/subscription/{subscription}``, provide only
``{subscription}``. (templated)
:type subscription: str
:param fail_if_not_exists: If True and the subscription does not exist,
fail the task
:type fail_if_not_exists: bool
:param gcp_conn_id: The connection ID to use connecting to
Google Cloud Platform.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request
must have domain-wide delegation enabled.
:type delegate_to: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: Sequence[Tuple[str, str]]]
:param project: (Deprecated) the GCP project ID where the topic will be created
:type project: str
"""
template_fields = ['project_id', 'subscription']
ui_color = '#cb4335'
@apply_defaults
def __init__(
self,
subscription: str,
project_id: Optional[str] = None,
fail_if_not_exists: bool = False,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
project: Optional[str] = None,
*args,
**kwargs) -> None:
# To preserve backward compatibility
# TODO: remove one day
if project:
warnings.warn(
"The project parameter has been deprecated. You should pass "
"the project_id parameter.", DeprecationWarning, stacklevel=2)
project_id = project
super().__init__(*args, **kwargs)
self.project_id = project_id
self.subscription = subscription
self.fail_if_not_exists = fail_if_not_exists
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.retry = retry
self.timeout = timeout
self.metadata = metadata
def execute(self, context):
hook = PubSubHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
self.log.info("Deleting subscription %s", self.subscription)
hook.delete_subscription(
project_id=self.project_id,
subscription=self.subscription,
fail_if_not_exists=self.fail_if_not_exists,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata
)
self.log.info("Deleted subscription %s", self.subscription)
class PubSubPublishMessageOperator(BaseOperator):
"""Publish messages to a PubSub topic.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PubSubPublishMessageOperator`
Each Task publishes all provided messages to the same topic
in a single GCP project. If the topic does not exist, this
task will fail. ::
m1 = {'data': b'Hello, World!',
'attributes': {'type': 'greeting'}
}
m2 = {'data': b'Knock, knock'}
m3 = {'attributes': {'foo': ''}}
t1 = PubSubPublishOperator(
project='my-project',topic='my_topic',
messages=[m1, m2, m3],
create_topic=True,
dag=dag)
``project`` , ``topic``, and ``messages`` are templated so you can use
variables in them.
:param project_id: Optional, the GCP project ID in which to work (templated).
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param topic: the topic to which to publish. Do not include the
full topic path. In other words, instead of
``projects/{project}/topics/{topic}``, provide only
``{topic}``. (templated)
:type topic: str
:param messages: a list of messages to be published to the
topic. Each message is a dict with one or more of the
following keys-value mappings:
* 'data': a bytestring (utf-8 encoded)
* 'attributes': {'key1': 'value1', ...}
Each message must contain at least a non-empty 'data' value
or an attribute dict with at least one key (templated). See
https://cloud.google.com/pubsub/docs/reference/rest/v1/PubsubMessage
:type messages: list
:param gcp_conn_id: The connection ID to use connecting to
Google Cloud Platform.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request
must have domain-wide delegation enabled.
:type delegate_to: str
:param project: (Deprecated) the GCP project ID where the topic will be created
:type project: str
"""
template_fields = ['project_id', 'topic', 'messages']
ui_color = '#0273d4'
@apply_defaults
def __init__(
self,
topic: str,
messages: List,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
project: Optional[str] = None,
*args,
**kwargs) -> None:
# To preserve backward compatibility
# TODO: remove one day
if project:
warnings.warn(
"The project parameter has been deprecated. You should pass "
"the project_id parameter.", DeprecationWarning, stacklevel=2)
project_id = project
super().__init__(*args, **kwargs)
self.project_id = project_id
self.topic = topic
self.messages = messages
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
def execute(self, context):
hook = PubSubHook(gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
self.log.info("Publishing to topic %s", self.topic)
hook.publish(project_id=self.project_id, topic=self.topic, messages=self.messages)
self.log.info("Published to topic %s", self.topic)
|
spektom/incubator-airflow
|
airflow/providers/google/cloud/operators/pubsub.py
|
Python
|
apache-2.0
| 28,015
|
from __future__ import absolute_import
input_name = '../examples/dg/advection_2D.py'
output_name = 'advection_sol.msh'
from tests_basic import TestInput
class Test( TestInput ):
pass
|
vlukes/sfepy
|
tests/test_dg_input_advection.py
|
Python
|
bsd-3-clause
| 187
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('demo_models', '0003_auto_20150419_2110'),
]
operations = [
migrations.AddField(
model_name='waiter',
name='days_worked',
field=models.IntegerField(default=None, null=True, blank=True),
),
]
|
altanawealth/django-report-builder
|
report_builder_demo/demo_models/migrations/0004_waiter_days_worked.py
|
Python
|
bsd-3-clause
| 434
|
import shutil
from io import BytesIO, TextIOWrapper
import logging
import os
from os.path import expanduser, exists
import struct
from configobj import ConfigObj
from Crypto.Cipher import AES
logger = logging.getLogger(__name__)
def load_config(usr_cfg, def_cfg=None):
cfg = ConfigObj()
cfg.merge(ConfigObj(def_cfg, interpolation=False))
cfg.merge(ConfigObj(expanduser(usr_cfg), interpolation=False))
cfg.filename = expanduser(usr_cfg)
return cfg
def write_default_config(source, destination, overwrite=False):
destination = expanduser(destination)
if not overwrite and exists(destination):
return
shutil.copyfile(source, destination)
def get_mylogin_cnf_path():
"""Return the path to the .mylogin.cnf file or None if doesn't exist."""
app_data = os.getenv('APPDATA')
if app_data is None:
mylogin_cnf_dir = os.path.expanduser('~')
else:
mylogin_cnf_dir = os.path.join(app_data, 'MySQL')
mylogin_cnf_dir = os.path.abspath(mylogin_cnf_dir)
mylogin_cnf_path = os.path.join(mylogin_cnf_dir, '.mylogin.cnf')
if exists(mylogin_cnf_path):
logger.debug("Found login path file at '{0}'".format(mylogin_cnf_path))
return mylogin_cnf_path
return None
def open_mylogin_cnf(name):
"""Open a readable version of .mylogin.cnf.
Returns the file contents as a TextIOWrapper object.
:param str name: The pathname of the file to be opened.
:return: the login path file or None
"""
try:
with open(name, 'rb') as f:
plaintext = read_and_decrypt_mylogin_cnf(f)
except (OSError, IOError):
logger.error('Unable to open login path file.')
return None
if not isinstance(plaintext, BytesIO):
logger.error('Unable to read login path file.')
return None
return TextIOWrapper(plaintext)
def read_and_decrypt_mylogin_cnf(f):
"""Read and decrypt the contents of .mylogin.cnf.
This decryption algorithm mimics the code in MySQL's
mysql_config_editor.cc.
The login key is 20-bytes of random non-printable ASCII.
It is written to the actual login path file. It is used
to generate the real key used in the AES cipher.
:param f: an I/O object opened in binary mode
:return: the decrypted login path file
:rtype: io.BytesIO or None
"""
# Number of bytes used to store the length of ciphertext.
MAX_CIPHER_STORE_LEN = 4
LOGIN_KEY_LEN = 20
# Move past the unused buffer.
buf = f.read(4)
if not buf or len(buf) != 4:
logger.error('Login path file is blank or incomplete.')
return None
# Read the login key.
key = f.read(LOGIN_KEY_LEN)
# Generate the real key.
rkey = [0] * 16
for i in range(LOGIN_KEY_LEN):
try:
rkey[i % 16] ^= ord(key[i:i+1])
except TypeError:
# ord() was unable to get the value of the byte.
logger.error('Unable to generate login path AES key.')
return None
rkey = struct.pack('16B', *rkey)
# Create a cipher object using the key.
aes_cipher = AES.new(rkey, AES.MODE_ECB)
# Create a bytes buffer to hold the plaintext.
plaintext = BytesIO()
while True:
# Read the length of the ciphertext.
len_buf = f.read(MAX_CIPHER_STORE_LEN)
if len(len_buf) < MAX_CIPHER_STORE_LEN:
break
cipher_len, = struct.unpack("<i", len_buf)
# Read cipher_len bytes from the file and decrypt.
cipher = f.read(cipher_len)
pplain = aes_cipher.decrypt(cipher)
try:
# Determine pad length.
pad_len = ord(pplain[-1:])
except TypeError:
# ord() was unable to get the value of the byte.
logger.warning('Unable to remove pad.')
continue
if pad_len > len(pplain) or len(set(pplain[-pad_len:])) != 1:
# Pad length should be less than or equal to the length of the
# plaintext. The pad should have a single unqiue byte.
logger.warning('Invalid pad found in login path file.')
continue
# Get rid of pad.
plain = pplain[:-pad_len]
plaintext.write(plain)
if plaintext.tell() == 0:
logger.error('No data successfully decrypted from login path file.')
return None
plaintext.seek(0)
return plaintext
|
danieljwest/mycli
|
mycli/config.py
|
Python
|
bsd-3-clause
| 4,404
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ConservationFeature'
db.create_table('seak_conservationfeature', (
('name', self.gf('django.db.models.fields.CharField')(max_length=99)),
('level1', self.gf('django.db.models.fields.CharField')(max_length=99)),
('level2', self.gf('django.db.models.fields.CharField')(max_length=99, null=True, blank=True)),
('level3', self.gf('django.db.models.fields.CharField')(max_length=99, null=True, blank=True)),
('level4', self.gf('django.db.models.fields.CharField')(max_length=99, null=True, blank=True)),
('level5', self.gf('django.db.models.fields.CharField')(max_length=99, null=True, blank=True)),
('dbf_fieldname', self.gf('django.db.models.fields.CharField')(max_length=15, null=True, blank=True)),
('units', self.gf('django.db.models.fields.CharField')(max_length=90, null=True, blank=True)),
('uid', self.gf('django.db.models.fields.IntegerField')(primary_key=True)),
))
db.send_create_signal('seak', ['ConservationFeature'])
# Adding model 'Cost'
db.create_table('seak_cost', (
('name', self.gf('django.db.models.fields.CharField')(max_length=99)),
('uid', self.gf('django.db.models.fields.IntegerField')(primary_key=True)),
('dbf_fieldname', self.gf('django.db.models.fields.CharField')(max_length=15, null=True, blank=True)),
('units', self.gf('django.db.models.fields.CharField')(max_length=16, null=True, blank=True)),
('desc', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('seak', ['Cost'])
# Adding model 'PlanningUnit'
db.create_table('seak_planningunit', (
('fid', self.gf('django.db.models.fields.IntegerField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=99)),
('geometry', self.gf('django.contrib.gis.db.models.fields.MultiPolygonField')(srid=3857, null=True, blank=True)),
('date_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('seak', ['PlanningUnit'])
# Adding model 'PuVsCf'
db.create_table('seak_puvscf', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('pu', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['seak.PlanningUnit'])),
('cf', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['seak.ConservationFeature'])),
('amount', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
))
db.send_create_signal('seak', ['PuVsCf'])
# Adding unique constraint on 'PuVsCf', fields ['pu', 'cf']
db.create_unique('seak_puvscf', ['pu_id', 'cf_id'])
# Adding model 'PuVsCost'
db.create_table('seak_puvscost', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('pu', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['seak.PlanningUnit'])),
('cost', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['seak.Cost'])),
('amount', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
))
db.send_create_signal('seak', ['PuVsCost'])
# Adding unique constraint on 'PuVsCost', fields ['pu', 'cost']
db.create_unique('seak_puvscost', ['pu_id', 'cost_id'])
# Adding model 'Scenario'
db.create_table('seak_scenario', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='seak_scenario_related', to=orm['auth.User'])),
('name', self.gf('django.db.models.fields.CharField')(max_length='255')),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('date_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='seak_scenario_related', null=True, to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('input_targets', self.gf('seak.models.JSONField')()),
('input_penalties', self.gf('seak.models.JSONField')()),
('input_relativecosts', self.gf('seak.models.JSONField')()),
('input_geography', self.gf('seak.models.JSONField')()),
('input_scalefactor', self.gf('django.db.models.fields.FloatField')(default=0.0)),
('description', self.gf('django.db.models.fields.TextField')(default='', null=True, blank=True)),
('output_best', self.gf('seak.models.JSONField')(null=True, blank=True)),
('output_pu_count', self.gf('seak.models.JSONField')(null=True, blank=True)),
))
db.send_create_signal('seak', ['Scenario'])
# Adding M2M table for field sharing_groups on 'Scenario'
db.create_table('seak_scenario_sharing_groups', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('scenario', models.ForeignKey(orm['seak.scenario'], null=False)),
('group', models.ForeignKey(orm['auth.group'], null=False))
))
db.create_unique('seak_scenario_sharing_groups', ['scenario_id', 'group_id'])
# Adding model 'Folder'
db.create_table('seak_folder', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='seak_folder_related', to=orm['auth.User'])),
('name', self.gf('django.db.models.fields.CharField')(max_length='255')),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('date_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='seak_folder_related', null=True, to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(default='', null=True, blank=True)),
))
db.send_create_signal('seak', ['Folder'])
# Adding M2M table for field sharing_groups on 'Folder'
db.create_table('seak_folder_sharing_groups', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('folder', models.ForeignKey(orm['seak.folder'], null=False)),
('group', models.ForeignKey(orm['auth.group'], null=False))
))
db.create_unique('seak_folder_sharing_groups', ['folder_id', 'group_id'])
# Adding model 'PlanningUnitShapes'
db.create_table('seak_planningunitshapes', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('pu', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['seak.PlanningUnit'])),
('stamp', self.gf('django.db.models.fields.FloatField')()),
('bests', self.gf('django.db.models.fields.IntegerField')(default=0)),
('hits', self.gf('django.db.models.fields.IntegerField')(default=0)),
('fid', self.gf('django.db.models.fields.IntegerField')(null=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=99, null=True)),
('geometry', self.gf('django.contrib.gis.db.models.fields.MultiPolygonField')(srid=3857, null=True, blank=True)),
))
db.send_create_signal('seak', ['PlanningUnitShapes'])
def backwards(self, orm):
# Removing unique constraint on 'PuVsCost', fields ['pu', 'cost']
db.delete_unique('seak_puvscost', ['pu_id', 'cost_id'])
# Removing unique constraint on 'PuVsCf', fields ['pu', 'cf']
db.delete_unique('seak_puvscf', ['pu_id', 'cf_id'])
# Deleting model 'ConservationFeature'
db.delete_table('seak_conservationfeature')
# Deleting model 'Cost'
db.delete_table('seak_cost')
# Deleting model 'PlanningUnit'
db.delete_table('seak_planningunit')
# Deleting model 'PuVsCf'
db.delete_table('seak_puvscf')
# Deleting model 'PuVsCost'
db.delete_table('seak_puvscost')
# Deleting model 'Scenario'
db.delete_table('seak_scenario')
# Removing M2M table for field sharing_groups on 'Scenario'
db.delete_table('seak_scenario_sharing_groups')
# Deleting model 'Folder'
db.delete_table('seak_folder')
# Removing M2M table for field sharing_groups on 'Folder'
db.delete_table('seak_folder_sharing_groups')
# Deleting model 'PlanningUnitShapes'
db.delete_table('seak_planningunitshapes')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 7, 18, 8, 47, 45, 101970)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 7, 18, 8, 47, 45, 101877)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'seak.conservationfeature': {
'Meta': {'object_name': 'ConservationFeature'},
'dbf_fieldname': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'level1': ('django.db.models.fields.CharField', [], {'max_length': '99'}),
'level2': ('django.db.models.fields.CharField', [], {'max_length': '99', 'null': 'True', 'blank': 'True'}),
'level3': ('django.db.models.fields.CharField', [], {'max_length': '99', 'null': 'True', 'blank': 'True'}),
'level4': ('django.db.models.fields.CharField', [], {'max_length': '99', 'null': 'True', 'blank': 'True'}),
'level5': ('django.db.models.fields.CharField', [], {'max_length': '99', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '99'}),
'uid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'units': ('django.db.models.fields.CharField', [], {'max_length': '90', 'null': 'True', 'blank': 'True'})
},
'seak.cost': {
'Meta': {'object_name': 'Cost'},
'dbf_fieldname': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'desc': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '99'}),
'uid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'units': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'})
},
'seak.folder': {
'Meta': {'object_name': 'Folder'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'seak_folder_related'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'sharing_groups': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'seak_folder_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.Group']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'seak_folder_related'", 'to': "orm['auth.User']"})
},
'seak.planningunit': {
'Meta': {'object_name': 'PlanningUnit'},
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'fid': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'geometry': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '99'})
},
'seak.planningunitshapes': {
'Meta': {'object_name': 'PlanningUnitShapes'},
'bests': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'fid': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'geometry': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857', 'null': 'True', 'blank': 'True'}),
'hits': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '99', 'null': 'True'}),
'pu': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['seak.PlanningUnit']"}),
'stamp': ('django.db.models.fields.FloatField', [], {})
},
'seak.puvscf': {
'Meta': {'unique_together': "(('pu', 'cf'),)", 'object_name': 'PuVsCf'},
'amount': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cf': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['seak.ConservationFeature']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pu': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['seak.PlanningUnit']"})
},
'seak.puvscost': {
'Meta': {'unique_together': "(('pu', 'cost'),)", 'object_name': 'PuVsCost'},
'amount': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cost': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['seak.Cost']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pu': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['seak.PlanningUnit']"})
},
'seak.scenario': {
'Meta': {'object_name': 'Scenario'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'seak_scenario_related'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'input_geography': ('seak.models.JSONField', [], {}),
'input_penalties': ('seak.models.JSONField', [], {}),
'input_relativecosts': ('seak.models.JSONField', [], {}),
'input_scalefactor': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'input_targets': ('seak.models.JSONField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'output_best': ('seak.models.JSONField', [], {'null': 'True', 'blank': 'True'}),
'output_pu_count': ('seak.models.JSONField', [], {'null': 'True', 'blank': 'True'}),
'sharing_groups': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'seak_scenario_related'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.Group']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'seak_scenario_related'", 'to': "orm['auth.User']"})
}
}
complete_apps = ['seak']
|
Ecotrust/cogs-priorities
|
priorities/seak/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 19,850
|
from __future__ import absolute_import, unicode_literals
import unittest
from django.test import TestCase
from wagtail.tests.search import models
from .test_backends import BackendTests
class TestDBBackend(BackendTests, TestCase):
backend_path = 'wagtail.wagtailsearch.backends.db'
@unittest.expectedFailure
def test_callable_indexed_field(self):
super(TestDBBackend, self).test_callable_indexed_field()
@unittest.expectedFailure
def test_update_index_command(self):
super(TestDBBackend, self).test_update_index_command()
def test_annotate_score(self):
results = self.backend.search("Hello", models.SearchTest).annotate_score('_score')
for result in results:
# DB backend doesn't do scoring, so annotate_score should just add None
self.assertIsNone(result._score)
|
chrxr/wagtail
|
wagtail/wagtailsearch/tests/test_db_backend.py
|
Python
|
bsd-3-clause
| 854
|
#!/usr/bin/python
# ------------------------------------------------------------------------------
# Copyright (C) 2012, Robert Johansson <rob@raditex.nu>, Raditex Control AB
# All rights reserved.
#
# This file is part of the rSCADA system.
#
# rSCADA
# http://www.rSCADA.se
# info@rscada.se
# ------------------------------------------------------------------------------
"""
Read CAN frame, parse into CANopen frame, and dump to STDOUT.
"""
from pycanopen import *
canopen = CANopen()
while True:
canopen_frame = canopen.read_frame()
if canopen_frame:
print canopen_frame
else:
print("CANopen Frame parse error")
|
luminize/libcanopen
|
python/examples/canopen-dump.py
|
Python
|
bsd-3-clause
| 649
|
# -*- coding: utf-8 -*-
#
# Anaconda documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 18 14:37:01 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, shutil
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# configuration required to import test modules
for path in ["../pyanaconda/isys/.libs", "../pyanaconda", "../tests", "../tests/lib", "../dracut", "../widgets"]:
sys.path.append(os.path.abspath(path))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
if not os.environ.get("READTHEDOCS") == "True":
extensions.extend(['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.coverage', 'sphinx.ext.inheritance_diagram', 'sphinx.ext.todo'])
shutil.copy2("../CONTRIBUTING.rst", "contributing.rst")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Anaconda'
copyright = '2015, Red Hat, Inc.' # pylint: disable=redefined-builtin
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
def read_version():
""" Read version from ../configure.ac"""
import re
version_re = re.compile(r"AC_INIT\(\[(.*)\], \[(.*)\], \[(.*)\]\)")
with open("../configure.ac", "r") as f:
for line in f:
m = version_re.match(line)
if m:
return m.group(2)
# The short X.Y version.
version = read_version()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'html']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
autoclass_content = 'both'
# Inheritence diagram graphviz settings
inheritance_graph_attrs = dict(rankdir="UD", fontsize=14, ratio='auto')
inheritance_node_attrs = dict(style='rounded', margin='"0.07, 0.07"')
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Anacondadoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Anaconda.tex', 'Anaconda Documentation',
'Anaconda Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'Anaconda', 'Anaconda Documentation',
['Anaconda Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Anaconda', 'Anaconda Documentation',
'Anaconda Team', 'Anaconda', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'Anaconda'
epub_author = 'Anaconda Team'
epub_publisher = 'Anaconda Team'
epub_copyright = '2015, Anaconda Team'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/3': None}
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme # pylint: disable=import-error
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Group by class
autodoc_member_order = 'source'
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# This was taken directly from here:
# http://read-the-docs.readthedocs.org/en/latest/faq.html#i-get-import-errors-on-libraries-that-depend-on-c-modules
# I only added the __getitem__ method.
# NOTE: this can be removed whenever we move to sphinx-1.3, at which point we'll
# be able to use autodoc_mock_imports (value is a list of modules to be
# mocked).
class Mock(object):
__all__ = []
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
@classmethod
def __getitem__(cls, key):
return cls.__getattr__(key)
MOCK_MODULES = ['_isys']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
|
jkonecny12/anaconda
|
docs/conf.py
|
Python
|
gpl-2.0
| 11,653
|
"""
Docstrings are another source of information for functions and classes.
:mod:`jedi.evaluate.dynamic` tries to find all executions of functions, while
the docstring parsing is much easier. There are three different types of
docstrings that |jedi| understands:
- `Sphinx <http://sphinx-doc.org/markup/desc.html#info-field-lists>`_
- `Epydoc <http://epydoc.sourceforge.net/manual-fields.html>`_
- `Numpydoc <https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt>`_
For example, the sphinx annotation ``:type foo: str`` clearly states that the
type of ``foo`` is ``str``.
As an addition to parameter searching, this module also provides return
annotations.
"""
import re
from textwrap import dedent
from parso import parse, ParserSyntaxError
from jedi._compatibility import u
from jedi.evaluate.utils import indent_block
from jedi.evaluate.cache import evaluator_method_cache
from jedi.evaluate.base_context import iterator_to_context_set, ContextSet, \
NO_CONTEXTS
from jedi.evaluate.lazy_context import LazyKnownContexts
DOCSTRING_PARAM_PATTERNS = [
r'\s*:type\s+%s:\s*([^\n]+)', # Sphinx
r'\s*:param\s+(\w+)\s+%s:[^\n]*', # Sphinx param with type
r'\s*@type\s+%s:\s*([^\n]+)', # Epydoc
]
DOCSTRING_RETURN_PATTERNS = [
re.compile(r'\s*:rtype:\s*([^\n]+)', re.M), # Sphinx
re.compile(r'\s*@rtype:\s*([^\n]+)', re.M), # Epydoc
]
REST_ROLE_PATTERN = re.compile(r':[^`]+:`([^`]+)`')
_numpy_doc_string_cache = None
def _get_numpy_doc_string_cls():
global _numpy_doc_string_cache
try:
from numpydoc.docscrape import NumpyDocString
_numpy_doc_string_cache = NumpyDocString
except ImportError as e:
_numpy_doc_string_cache = e
if isinstance(_numpy_doc_string_cache, ImportError):
raise _numpy_doc_string_cache
return _numpy_doc_string_cache
def _search_param_in_numpydocstr(docstr, param_str):
"""Search `docstr` (in numpydoc format) for type(-s) of `param_str`."""
try:
# This is a non-public API. If it ever changes we should be
# prepared and return gracefully.
params = _get_numpy_doc_string_cls()(docstr)._parsed_data['Parameters']
except (KeyError, AttributeError, ImportError):
return []
for p_name, p_type, p_descr in params:
if p_name == param_str:
m = re.match('([^,]+(,[^,]+)*?)(,[ ]*optional)?$', p_type)
if m:
p_type = m.group(1)
return list(_expand_typestr(p_type))
return []
def _search_return_in_numpydocstr(docstr):
"""
Search `docstr` (in numpydoc format) for type(-s) of function returns.
"""
try:
doc = _get_numpy_doc_string_cls()(docstr)
except ImportError:
return
try:
# This is a non-public API. If it ever changes we should be
# prepared and return gracefully.
returns = doc._parsed_data['Returns']
returns += doc._parsed_data['Yields']
except (KeyError, AttributeError):
return
for r_name, r_type, r_descr in returns:
# Return names are optional and if so the type is in the name
if not r_type:
r_type = r_name
for type_ in _expand_typestr(r_type):
yield type_
def _expand_typestr(type_str):
"""
Attempts to interpret the possible types in `type_str`
"""
# Check if alternative types are specified with 'or'
if re.search('\\bor\\b', type_str):
for t in type_str.split('or'):
yield t.split('of')[0].strip()
# Check if like "list of `type`" and set type to list
elif re.search('\\bof\\b', type_str):
yield type_str.split('of')[0]
# Check if type has is a set of valid literal values eg: {'C', 'F', 'A'}
elif type_str.startswith('{'):
node = parse(type_str, version='3.6').children[0]
if node.type == 'atom':
for leaf in node.children[1].children:
if leaf.type == 'number':
if '.' in leaf.value:
yield 'float'
else:
yield 'int'
elif leaf.type == 'string':
if 'b' in leaf.string_prefix.lower():
yield 'bytes'
else:
yield 'str'
# Ignore everything else.
# Otherwise just work with what we have.
else:
yield type_str
def _search_param_in_docstr(docstr, param_str):
"""
Search `docstr` for type(-s) of `param_str`.
>>> _search_param_in_docstr(':type param: int', 'param')
['int']
>>> _search_param_in_docstr('@type param: int', 'param')
['int']
>>> _search_param_in_docstr(
... ':type param: :class:`threading.Thread`', 'param')
['threading.Thread']
>>> bool(_search_param_in_docstr('no document', 'param'))
False
>>> _search_param_in_docstr(':param int param: some description', 'param')
['int']
"""
# look at #40 to see definitions of those params
patterns = [re.compile(p % re.escape(param_str))
for p in DOCSTRING_PARAM_PATTERNS]
for pattern in patterns:
match = pattern.search(docstr)
if match:
return [_strip_rst_role(match.group(1))]
return _search_param_in_numpydocstr(docstr, param_str)
def _strip_rst_role(type_str):
"""
Strip off the part looks like a ReST role in `type_str`.
>>> _strip_rst_role(':class:`ClassName`') # strip off :class:
'ClassName'
>>> _strip_rst_role(':py:obj:`module.Object`') # works with domain
'module.Object'
>>> _strip_rst_role('ClassName') # do nothing when not ReST role
'ClassName'
See also:
http://sphinx-doc.org/domains.html#cross-referencing-python-objects
"""
match = REST_ROLE_PATTERN.match(type_str)
if match:
return match.group(1)
else:
return type_str
def _evaluate_for_statement_string(module_context, string):
code = dedent(u("""
def pseudo_docstring_stuff():
'''
Create a pseudo function for docstring statements.
Need this docstring so that if the below part is not valid Python this
is still a function.
'''
{}
"""))
if string is None:
return []
for element in re.findall('((?:\w+\.)*\w+)\.', string):
# Try to import module part in dotted name.
# (e.g., 'threading' in 'threading.Thread').
string = 'import %s\n' % element + string
# Take the default grammar here, if we load the Python 2.7 grammar here, it
# will be impossible to use `...` (Ellipsis) as a token. Docstring types
# don't need to conform with the current grammar.
grammar = module_context.evaluator.latest_grammar
try:
module = grammar.parse(code.format(indent_block(string)), error_recovery=False)
except ParserSyntaxError:
return []
try:
funcdef = next(module.iter_funcdefs())
# First pick suite, then simple_stmt and then the node,
# which is also not the last item, because there's a newline.
stmt = funcdef.children[-1].children[-1].children[-2]
except (AttributeError, IndexError):
return []
from jedi.evaluate.context import FunctionContext
function_context = FunctionContext(
module_context.evaluator,
module_context,
funcdef
)
func_execution_context = function_context.get_function_execution()
# Use the module of the param.
# TODO this module is not the module of the param in case of a function
# call. In that case it's the module of the function call.
# stuffed with content from a function call.
return list(_execute_types_in_stmt(func_execution_context, stmt))
def _execute_types_in_stmt(module_context, stmt):
"""
Executing all types or general elements that we find in a statement. This
doesn't include tuple, list and dict literals, because the stuff they
contain is executed. (Used as type information).
"""
definitions = module_context.eval_node(stmt)
return ContextSet.from_sets(
_execute_array_values(module_context.evaluator, d)
for d in definitions
)
def _execute_array_values(evaluator, array):
"""
Tuples indicate that there's not just one return value, but the listed
ones. `(str, int)` means that it returns a tuple with both types.
"""
from jedi.evaluate.context.iterable import SequenceLiteralContext, FakeSequence
if isinstance(array, SequenceLiteralContext):
values = []
for lazy_context in array.py__iter__():
objects = ContextSet.from_sets(
_execute_array_values(evaluator, typ)
for typ in lazy_context.infer()
)
values.append(LazyKnownContexts(objects))
return {FakeSequence(evaluator, array.array_type, values)}
else:
return array.execute_evaluated()
@evaluator_method_cache()
def infer_param(execution_context, param):
from jedi.evaluate.context.instance import AnonymousInstanceFunctionExecution
def eval_docstring(docstring):
return ContextSet.from_iterable(
p
for param_str in _search_param_in_docstr(docstring, param.name.value)
for p in _evaluate_for_statement_string(module_context, param_str)
)
module_context = execution_context.get_root_context()
func = param.get_parent_function()
if func.type == 'lambdef':
return NO_CONTEXTS
types = eval_docstring(execution_context.py__doc__())
if isinstance(execution_context, AnonymousInstanceFunctionExecution) and \
execution_context.function_context.name.string_name == '__init__':
class_context = execution_context.instance.class_context
types |= eval_docstring(class_context.py__doc__())
return types
@evaluator_method_cache()
@iterator_to_context_set
def infer_return_types(function_context):
def search_return_in_docstr(code):
for p in DOCSTRING_RETURN_PATTERNS:
match = p.search(code)
if match:
yield _strip_rst_role(match.group(1))
# Check for numpy style return hint
for type_ in _search_return_in_numpydocstr(code):
yield type_
for type_str in search_return_in_docstr(function_context.py__doc__()):
for type_eval in _evaluate_for_statement_string(function_context.get_root_context(), type_str):
yield type_eval
|
technologiescollege/Blockly-rduino-communication
|
scripts_XP/Lib/site-packages/jedi/evaluate/docstrings.py
|
Python
|
gpl-3.0
| 10,503
|
# -*- coding: utf-8 -*-
from module.plugins.internal.XFSAccount import XFSAccount
class FilerioCom(XFSAccount):
__name__ = "FilerioCom"
__type__ = "account"
__version__ = "0.07"
__status__ = "testing"
__description__ = """FileRio.in account plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
PLUGIN_DOMAIN = "filerio.in"
|
Guidobelix/pyload
|
module/plugins/accounts/FilerioCom.py
|
Python
|
gpl-3.0
| 407
|
# Albert Cardona 2014-11-20
# This file is meant to be run from within ./manage.py shell in the environment, like:
# [1] load export_all_graphml.py
# [2] project_id = 12
# [2] export(project_id, "all.graphml")
#
# Will generate a gzip'ed file like "all.graphml.gz"
#
# Includes all skeletons with more than 1 treenode;
# each skeleton is an undirected graph, where each treenode is a node
# (with the skeleton ID and the location as extra attributes)
# and each relationship between child and parent treenodes is an undirected edge
# that has the skeleton ID as an extra attribute.
# Each presynaptic+postsynaptic connection is a directed edge between treenodes;
# these directed edges also contain the skeleton ID of the pre- and the postsynaptic
# skeletons.
from __future__ import with_statement
from django.db import connection
from django.db import transaction
import gzip
import sys
def writeOneSkeleton(file, cursor, skid):
cursor.execute('''
select id, parent_id, location_x, location_y, location_z
from treenode
where skeleton_id=%s
''' % skid)
for row in cursor.fetchall():
file.write('''<node id="n%s">
<data key="skid">%s</data>
<data key="x">%s</data>
<data key="y">%s</data>
<data key="z">%s</data>
</node>\n''' % (row[0], skid, row[2], row[3], row[4]))
if row[1]:
file.write('<edge id="e%s" directed="false" source="n%s" target="n%s" />\n' % (row[0], row[0], row[1]))
@transaction.atomic
def export(project_id, filename):
project_id = int(project_id)
cursor = connection.cursor()
with gzip.open(filename + '.gz', 'w') as file:
file.write('''<?xml version="1.0" encoding="UTF-8"?>
<graphml xmlns="http://graphml.graphdrawing.org/xmlns"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd">
<key id="skid" for="node" attr.name="skeleton id" attr.type="long"/>
<key id="x" for="node" attr.name="x" attr.type="float"/>
<key id="y" for="node" attr.name="y" attr.type="float"/>
<key id="z" for="node" attr.name="z" attr.type="float"/>
<key id="pre_skid" for="edge" attr.name="presynaptic skeleton id" attr.type="long"/>
<key id="post_skid" for="edge" attr.name="postsynaptic skeleton id" attr.type="long"/>
<graph id="CNS">\n''')
#
cursor.execute('''
select skeleton_id
from treenode
where project_id=%s
group by skeleton_id
having count(*) > 1
''' % project_id)
#
for row in cursor.fetchall():
print("Writing skeleton nodes for %s" % row[0])
writeOneSkeleton(file, cursor, row[0])
#
cursor.execute('''
select relation_name, id from relation where project_id=%s
''' % project_id)
relations = dict(cursor.fetchall())
#
cursor.execute('''
select tc2.id, tc1.treenode_id, tc2.treenode_id,
tc1.skeleton_id, tc2.skeleton_id
from treenode_connector tc1,
treenode_connector tc2
where tc1.project_id=%s
and tc1.relation_id = %s
and tc2.relation_id = %s
and tc1.connector_id = tc2.connector_id
and tc1.skeleton_id IN (select skeleton_id from treenode where project_id=%s group by skeleton_id having count(*) > 1)
''' % (project_id, relations['presynaptic_to'], relations['postsynaptic_to'], project_id))
#
print("Writing synapses")
for row in cursor.fetchall():
file.write('<edge id="e%s" directed="true" source="n%s" target="n%s">\n<data key="pre_skid">%s</data>\n<data key="post_skid">%s</data>\n</edge>\n' % row)
#
file.write("</graph>\n</graphml>")
def run():
if sys.argv < 3:
print("Need 2 arguments: <project id> <filename.gml>")
else:
project_id = int(sys.argv[1])
filename = sys.argv[2]
run(project_id, filename)
|
catsop/CATMAID
|
scripts/export/export_all_graphml.py
|
Python
|
gpl-3.0
| 3,992
|
#!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <wayne@riotousliving.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_job_template
author: "Wayne Witzel III (@wwitzel3)"
version_added: "2.3"
short_description: create, update, or destroy Ansible Tower job template.
description:
- Create, update, or destroy Ansible Tower job templates. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- Name to use for the job template.
required: True
description:
description:
- Description to use for the job template.
job_type:
description:
- The job type to use for the job template.
required: True
choices: ["run", "check", "scan"]
inventory:
description:
- Name of the inventory to use for the job template.
project:
description:
- Name of the project to use for the job template.
required: True
playbook:
description:
- Path to the playbook to use for the job template within the project provided.
required: True
credential:
description:
- Name of the credential to use for the job template.
version_added: 2.7
vault_credential:
description:
- Name of the vault credential to use for the job template.
version_added: 2.7
forks:
description:
- The number of parallel or simultaneous processes to use while executing the playbook.
limit:
description:
- A host pattern to further constrain the list of hosts managed or affected by the playbook
verbosity:
description:
- Control the output level Ansible produces as the playbook runs. 0 - Normal, 1 - Verbose, 2 - More Verbose, 3 - Debug, 4 - Connection Debug.
choices: [0, 1, 2, 3, 4]
default: 0
extra_vars_path:
description:
- Path to the C(extra_vars) YAML file.
job_tags:
description:
- Comma separated list of the tags to use for the job template.
force_handlers_enabled:
description:
- Enable forcing playbook handlers to run even if a task fails.
version_added: 2.7
type: bool
default: 'no'
skip_tags:
description:
- Comma separated list of the tags to skip for the job template.
start_at_task:
description:
- Start the playbook at the task matching this name.
version_added: 2.7
fact_caching_enabled:
description:
- Enable use of fact caching for the job template.
version_added: 2.7
type: bool
default: 'no'
host_config_key:
description:
- Allow provisioning callbacks using this host config key.
ask_diff_mode:
description:
- Prompt user to enable diff mode (show changes) to files when supported by modules.
version_added: 2.7
type: bool
default: 'no'
ask_extra_vars:
description:
- Prompt user for (extra_vars) on launch.
type: bool
default: 'no'
ask_limit:
description:
- Prompt user for a limit on launch.
version_added: 2.7
type: bool
default: 'no'
ask_tags:
description:
- Prompt user for job tags on launch.
type: bool
default: 'no'
ask_skip_tags:
description:
- Prompt user for job tags to skip on launch.
version_added: 2.7
type: bool
default: 'no'
ask_job_type:
description:
- Prompt user for job type on launch.
type: bool
default: 'no'
ask_verbosity:
description:
- Prompt user to choose a verbosity level on launch.
version_added: 2.7
type: bool
default: 'no'
ask_inventory:
description:
- Propmt user for inventory on launch.
type: bool
default: 'no'
ask_credential:
description:
- Prompt user for credential on launch.
type: bool
default: 'no'
survey_enabled:
description:
- Enable a survey on the job template.
version_added: 2.7
type: bool
default: 'no'
survey_spec:
description:
- JSON/YAML dict formatted survey definition.
version_added: 2.8
type: dict
required: False
become_enabled:
description:
- Activate privilege escalation.
type: bool
default: 'no'
concurrent_jobs_enabled:
description:
- Allow simultaneous runs of the job template.
version_added: 2.7
type: bool
default: 'no'
state:
description:
- Desired state of the resource.
default: "present"
choices: ["present", "absent"]
extends_documentation_fragment: tower
notes:
- JSON for survey_spec can be found in Tower API Documentation. See
U(https://docs.ansible.com/ansible-tower/latest/html/towerapi/api_ref.html#/Job_Templates/Job_Templates_job_templates_survey_spec_create)
for POST operation payload example.
'''
EXAMPLES = '''
- name: Create tower Ping job template
tower_job_template:
name: "Ping"
job_type: "run"
inventory: "Local"
project: "Demo"
playbook: "ping.yml"
credential: "Local"
state: "present"
tower_config_file: "~/tower_cli.cfg"
survey_enabled: yes
survey_spec: "{{ lookup('file', 'my_survey.json') }}"
'''
from ansible.module_utils.ansible_tower import TowerModule, tower_auth_config, tower_check_mode
try:
import tower_cli
import tower_cli.exceptions as exc
from tower_cli.conf import settings
except ImportError:
pass
def update_fields(p):
'''This updates the module field names
to match the field names tower-cli expects to make
calling of the modify/delete methods easier.
'''
params = p.copy()
field_map = {
'fact_caching_enabled': 'use_fact_cache',
'ask_diff_mode': 'ask_diff_mode_on_launch',
'ask_extra_vars': 'ask_variables_on_launch',
'ask_limit': 'ask_limit_on_launch',
'ask_tags': 'ask_tags_on_launch',
'ask_skip_tags': 'ask_skip_tags_on_launch',
'ask_verbosity': 'ask_verbosity_on_launch',
'ask_inventory': 'ask_inventory_on_launch',
'ask_credential': 'ask_credential_on_launch',
'ask_job_type': 'ask_job_type_on_launch',
'diff_mode_enabled': 'diff_mode',
'concurrent_jobs_enabled': 'allow_simultaneous',
'force_handlers_enabled': 'force_handlers',
}
params_update = {}
for old_k, new_k in field_map.items():
v = params.pop(old_k)
params_update[new_k] = v
extra_vars = params.get('extra_vars_path')
if extra_vars is not None:
params_update['extra_vars'] = ['@' + extra_vars]
params.update(params_update)
return params
def update_resources(module, p):
params = p.copy()
identity_map = {
'project': 'name',
'inventory': 'name',
'credential': 'name',
'vault_credential': 'name',
}
for k, v in identity_map.items():
try:
if params[k]:
key = 'credential' if '_credential' in k else k
result = tower_cli.get_resource(key).get(**{v: params[k]})
params[k] = result['id']
elif k in params:
# unset empty parameters to avoid ValueError: invalid literal for int() with base 10: ''
del(params[k])
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update job template: {0}'.format(excinfo), changed=False)
return params
def main():
argument_spec = dict(
name=dict(required=True),
description=dict(default=''),
job_type=dict(choices=['run', 'check', 'scan'], required=True),
inventory=dict(default=''),
project=dict(required=True),
playbook=dict(required=True),
credential=dict(default=''),
vault_credential=dict(default=''),
forks=dict(type='int'),
limit=dict(default=''),
verbosity=dict(type='int', choices=[0, 1, 2, 3, 4], default=0),
extra_vars_path=dict(type='path', required=False),
job_tags=dict(default=''),
force_handlers_enabled=dict(type='bool', default=False),
skip_tags=dict(default=''),
start_at_task=dict(default=''),
timeout=dict(type='int', default=0),
fact_caching_enabled=dict(type='bool', default=False),
host_config_key=dict(default=''),
ask_diff_mode=dict(type='bool', default=False),
ask_extra_vars=dict(type='bool', default=False),
ask_limit=dict(type='bool', default=False),
ask_tags=dict(type='bool', default=False),
ask_skip_tags=dict(type='bool', default=False),
ask_job_type=dict(type='bool', default=False),
ask_verbosity=dict(type='bool', default=False),
ask_inventory=dict(type='bool', default=False),
ask_credential=dict(type='bool', default=False),
survey_enabled=dict(type='bool', default=False),
survey_spec=dict(type='dict', required=False),
become_enabled=dict(type='bool', default=False),
diff_mode_enabled=dict(type='bool', default=False),
concurrent_jobs_enabled=dict(type='bool', default=False),
state=dict(choices=['present', 'absent'], default='present'),
)
module = TowerModule(argument_spec=argument_spec, supports_check_mode=True)
name = module.params.get('name')
state = module.params.pop('state')
json_output = {'job_template': name, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
jt = tower_cli.get_resource('job_template')
params = update_resources(module, module.params)
params = update_fields(params)
params['create_on_missing'] = True
try:
if state == 'present':
result = jt.modify(**params)
json_output['id'] = result['id']
elif state == 'absent':
result = jt.delete(**params)
except (exc.ConnectionError, exc.BadRequest, exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update job template: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
if __name__ == '__main__':
main()
|
valentin-krasontovitsch/ansible
|
lib/ansible/modules/web_infrastructure/ansible_tower/tower_job_template.py
|
Python
|
gpl-3.0
| 10,716
|
# Copyright (c) 2011 Mattias Nissler <mattias.nissler@gmx.de>
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. The name of the author may not be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import itertools
import re
import sys
import unittest
from tuntap.packet_codec import TunPacketCodec, TunAFPacketCodec, TapPacketCodec
from tuntap.packet_reader import BlockingPacketSource, SelectPacketSource
from tuntap.test_char_dev import TestTunCharDev, TestTapCharDev
from tuntap.test_interface import TestTunInterface, TestTapInterface
from tuntap.test_ip import TestIp, TestIp6, TestMulticast, TestMulticast6
class FilteringTestSuite(unittest.TestSuite):
def __init__(self, filter):
super(FilteringTestSuite, self).__init__()
self._matcher = re.compile(filter or '.*')
def __iter__(self):
return itertools.ifilter(lambda test : self._matcher.search(str(test)),
super(FilteringTestSuite, self).__iter__())
def loadTestsFromTestCase(testCaseClass, *args, **kwargs):
testCaseNames = unittest.getTestCaseNames(testCaseClass, 'test_')
return unittest.TestSuite(map(lambda n : testCaseClass(n, *args, **kwargs), testCaseNames))
def main(argv):
# Parse the command line.
parser = argparse.ArgumentParser(description = 'Run tuntap unit tests.')
parser.add_argument('--tests', type = str, nargs = '?', default = None,
help = 'tests to run')
parser.add_argument('--verbosity', type = int, nargs = '?', default = 2,
help = 'verbosity level')
options = parser.parse_args(argv[1:])
# Gather tests and run them.
loader = unittest.TestLoader()
suite = FilteringTestSuite(options.tests)
suite.addTests(loadTestsFromTestCase(TestTunCharDev))
suite.addTests(loadTestsFromTestCase(TestTapCharDev))
suite.addTests(loadTestsFromTestCase(TestTunInterface))
suite.addTests(loadTestsFromTestCase(TestTapInterface))
codecs = (TunPacketCodec, TunAFPacketCodec, TapPacketCodec)
sources = (SelectPacketSource, BlockingPacketSource)
tests = (TestIp, TestIp6, TestMulticast, TestMulticast6)
for (test, codec, source) in [ (test, codec, source) for test in tests
for codec in codecs
for source in sources ]:
suite.addTests(loadTestsFromTestCase(test, lambda af, addr: codec(af, addr, source)))
runner = unittest.TextTestRunner(stream = sys.stderr,
descriptions = True,
verbosity = options.verbosity)
runner.run(suite)
if __name__ == '__main__':
main(sys.argv)
|
shutej/tapcfg
|
drivers/osx/tuntap/test/tuntap/tuntap_tests.py
|
Python
|
lgpl-2.1
| 4,009
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyFlaskSocketio(PythonPackage):
"""Flask-SocketIO gives Flask applications access to low latency
bi-directional communications between the clients and the server.
The client-side application can use any of the SocketIO official clients
libraries in Javascript, C++, Java and Swift, or any compatible client to
establish a permanent connection to the server.
"""
homepage = "https://flask-socketio.readthedocs.io"
url = "https://pypi.io/packages/source/F/Flask-SocketIO/Flask-SocketIO-2.9.6.tar.gz"
version('2.9.6', 'bca83faf38355bd91911f2f140f9b50f')
depends_on('py-setuptools', type='build')
depends_on('py-flask@0.9:', type=('build', 'run'))
depends_on('py-python-socketio@1.6.1:', type=('build', 'run'))
depends_on('py-werkzeug', type=('build', 'run'))
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/py-flask-socketio/package.py
|
Python
|
lgpl-2.1
| 2,117
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Prodigal(MakefilePackage):
"""Fast, reliable protein-coding gene prediction for prokaryotic
genomes."""
homepage = "https://github.com/hyattpd/Prodigal"
url = "https://github.com/hyattpd/Prodigal/archive/v2.6.3.tar.gz"
version('2.6.3', '5181809fdb740e9a675cfdbb6c038466')
def install(self, spec, prefix):
make('INSTALLDIR={0}'.format(self.prefix), 'install')
def setup_environment(self, spack_env, run_env):
run_env.prepend_path('PATH', prefix)
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/prodigal/package.py
|
Python
|
lgpl-2.1
| 1,761
|
# -*- coding: utf-8 -*-
import os
import robot
from keywordgroup import KeywordGroup
class _ScreenshotKeywords(KeywordGroup):
def __init__(self):
self._screenshot_index = 0
# Public
def capture_page_screenshot(self, filename=None):
"""Takes a screenshot of the current page and embeds it into the log.
`filename` argument specifies the name of the file to write the
screenshot into. If no `filename` is given, the screenshot is saved into file
`appium-screenshot-<counter>.png` under the directory where
the Robot Framework log file is written into. The `filename` is
also considered relative to the same directory, if it is not
given in absolute format.
`css` can be used to modify how the screenshot is taken. By default
the bakground color is changed to avoid possible problems with
background leaking when the page layout is somehow broken.
"""
path, link = self._get_screenshot_paths(filename)
if hasattr(self._current_application(), 'get_screenshot_as_file'):
self._current_application().get_screenshot_as_file(path)
else:
self._current_application().save_screenshot(path)
# Image is shown on its own row and thus prev row is closed on purpose
self._html('</td></tr><tr><td colspan="3"><a href="%s">'
'<img src="%s" width="800px"></a>' % (link, link))
# Private
def _get_screenshot_paths(self, filename):
if not filename:
self._screenshot_index += 1
filename = 'appium-screenshot-%d.png' % self._screenshot_index
else:
filename = filename.replace('/', os.sep)
logdir = self._get_log_dir()
path = os.path.join(logdir, filename)
link = robot.utils.get_link_path(path, logdir)
return path, link
|
michaelmendoza42/robotframework-appiumlibrary
|
src/AppiumLibrary/keywords/_screenshot.py
|
Python
|
apache-2.0
| 1,938
|
# Import all metrics
from django_prometheus.db.metrics import *
|
DingaGa/django-prometheus
|
django_prometheus/db/__init__.py
|
Python
|
apache-2.0
| 64
|
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from c7n.filters import CrossAccountAccessFilter
from c7n.manager import resources
from c7n.query import QueryResourceManager
from c7n.utils import local_session
@resources.register('sns')
class SNS(QueryResourceManager):
resource_type = 'aws.sns.topic'
def augment(self, resources):
def _augment(r):
client = local_session(self.session_factory).client('sns')
attrs = client.get_topic_attributes(
TopicArn=r['TopicArn'])['Attributes']
r.update(attrs)
return r
self.log.debug("retrieving details for %d topics" % len(resources))
with self.executor_factory(max_workers=4) as w:
return list(w.map(_augment, resources))
SNS.filter_registry.register('cross-account', CrossAccountAccessFilter)
|
gwh59/cloud-custodian
|
c7n/resources/sns.py
|
Python
|
apache-2.0
| 1,389
|
"""Test sensor of Brother integration."""
from datetime import datetime, timedelta
import json
from homeassistant.components.brother.const import UNIT_PAGES
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
DEVICE_CLASS_TIMESTAMP,
PERCENTAGE,
STATE_UNAVAILABLE,
)
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import UTC, utcnow
from tests.async_mock import patch
from tests.common import async_fire_time_changed, load_fixture
from tests.components.brother import init_integration
ATTR_REMAINING_PAGES = "remaining_pages"
ATTR_COUNTER = "counter"
async def test_sensors(hass):
"""Test states of the sensors."""
test_time = datetime(2019, 11, 11, 9, 10, 32, tzinfo=UTC)
with patch(
"homeassistant.components.brother.sensor.utcnow", return_value=test_time
):
await init_integration(hass)
registry = await hass.helpers.entity_registry.async_get_registry()
state = hass.states.get("sensor.hl_l2340dw_status")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:printer"
assert state.state == "waiting"
entry = registry.async_get("sensor.hl_l2340dw_status")
assert entry
assert entry.unique_id == "0123456789_status"
state = hass.states.get("sensor.hl_l2340dw_black_toner_remaining")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:printer-3d-nozzle"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "75"
entry = registry.async_get("sensor.hl_l2340dw_black_toner_remaining")
assert entry
assert entry.unique_id == "0123456789_black_toner_remaining"
state = hass.states.get("sensor.hl_l2340dw_cyan_toner_remaining")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:printer-3d-nozzle"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "10"
entry = registry.async_get("sensor.hl_l2340dw_cyan_toner_remaining")
assert entry
assert entry.unique_id == "0123456789_cyan_toner_remaining"
state = hass.states.get("sensor.hl_l2340dw_magenta_toner_remaining")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:printer-3d-nozzle"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "8"
entry = registry.async_get("sensor.hl_l2340dw_magenta_toner_remaining")
assert entry
assert entry.unique_id == "0123456789_magenta_toner_remaining"
state = hass.states.get("sensor.hl_l2340dw_yellow_toner_remaining")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:printer-3d-nozzle"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "2"
entry = registry.async_get("sensor.hl_l2340dw_yellow_toner_remaining")
assert entry
assert entry.unique_id == "0123456789_yellow_toner_remaining"
state = hass.states.get("sensor.hl_l2340dw_drum_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:chart-donut"
assert state.attributes.get(ATTR_REMAINING_PAGES) == 11014
assert state.attributes.get(ATTR_COUNTER) == 986
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "92"
entry = registry.async_get("sensor.hl_l2340dw_drum_remaining_life")
assert entry
assert entry.unique_id == "0123456789_drum_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_black_drum_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:chart-donut"
assert state.attributes.get(ATTR_REMAINING_PAGES) == 16389
assert state.attributes.get(ATTR_COUNTER) == 1611
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "92"
entry = registry.async_get("sensor.hl_l2340dw_black_drum_remaining_life")
assert entry
assert entry.unique_id == "0123456789_black_drum_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_cyan_drum_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:chart-donut"
assert state.attributes.get(ATTR_REMAINING_PAGES) == 16389
assert state.attributes.get(ATTR_COUNTER) == 1611
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "92"
entry = registry.async_get("sensor.hl_l2340dw_cyan_drum_remaining_life")
assert entry
assert entry.unique_id == "0123456789_cyan_drum_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_magenta_drum_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:chart-donut"
assert state.attributes.get(ATTR_REMAINING_PAGES) == 16389
assert state.attributes.get(ATTR_COUNTER) == 1611
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "92"
entry = registry.async_get("sensor.hl_l2340dw_magenta_drum_remaining_life")
assert entry
assert entry.unique_id == "0123456789_magenta_drum_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_yellow_drum_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:chart-donut"
assert state.attributes.get(ATTR_REMAINING_PAGES) == 16389
assert state.attributes.get(ATTR_COUNTER) == 1611
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "92"
entry = registry.async_get("sensor.hl_l2340dw_yellow_drum_remaining_life")
assert entry
assert entry.unique_id == "0123456789_yellow_drum_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_fuser_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:water-outline"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "97"
entry = registry.async_get("sensor.hl_l2340dw_fuser_remaining_life")
assert entry
assert entry.unique_id == "0123456789_fuser_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_belt_unit_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:current-ac"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "97"
entry = registry.async_get("sensor.hl_l2340dw_belt_unit_remaining_life")
assert entry
assert entry.unique_id == "0123456789_belt_unit_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_pf_kit_1_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:printer-3d"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "98"
entry = registry.async_get("sensor.hl_l2340dw_pf_kit_1_remaining_life")
assert entry
assert entry.unique_id == "0123456789_pf_kit_1_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_page_counter")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:file-document-outline"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UNIT_PAGES
assert state.state == "986"
entry = registry.async_get("sensor.hl_l2340dw_page_counter")
assert entry
assert entry.unique_id == "0123456789_page_counter"
state = hass.states.get("sensor.hl_l2340dw_duplex_unit_pages_counter")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:file-document-outline"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UNIT_PAGES
assert state.state == "538"
entry = registry.async_get("sensor.hl_l2340dw_duplex_unit_pages_counter")
assert entry
assert entry.unique_id == "0123456789_duplex_unit_pages_counter"
state = hass.states.get("sensor.hl_l2340dw_b_w_counter")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:file-document-outline"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UNIT_PAGES
assert state.state == "709"
entry = registry.async_get("sensor.hl_l2340dw_b_w_counter")
assert entry
assert entry.unique_id == "0123456789_b/w_counter"
state = hass.states.get("sensor.hl_l2340dw_color_counter")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:file-document-outline"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UNIT_PAGES
assert state.state == "902"
entry = registry.async_get("sensor.hl_l2340dw_color_counter")
assert entry
assert entry.unique_id == "0123456789_color_counter"
state = hass.states.get("sensor.hl_l2340dw_uptime")
assert state
assert state.attributes.get(ATTR_ICON) is None
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) is None
assert state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_TIMESTAMP
assert state.state == "2019-09-24T12:14:56+00:00"
entry = registry.async_get("sensor.hl_l2340dw_uptime")
assert entry
assert entry.unique_id == "0123456789_uptime"
async def test_availability(hass):
"""Ensure that we mark the entities unavailable correctly when device is offline."""
await init_integration(hass)
state = hass.states.get("sensor.hl_l2340dw_status")
assert state
assert state.state != STATE_UNAVAILABLE
assert state.state == "waiting"
future = utcnow() + timedelta(minutes=5)
with patch("brother.Brother._get_data", side_effect=ConnectionError()):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("sensor.hl_l2340dw_status")
assert state
assert state.state == STATE_UNAVAILABLE
future = utcnow() + timedelta(minutes=10)
with patch(
"brother.Brother._get_data",
return_value=json.loads(load_fixture("brother_printer_data.json")),
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("sensor.hl_l2340dw_status")
assert state
assert state.state != STATE_UNAVAILABLE
assert state.state == "waiting"
async def test_manual_update_entity(hass):
"""Test manual update entity via service homeasasistant/update_entity."""
await init_integration(hass)
await async_setup_component(hass, "homeassistant", {})
with patch("homeassistant.components.brother.Brother.async_update") as mock_update:
await hass.services.async_call(
"homeassistant",
"update_entity",
{ATTR_ENTITY_ID: ["sensor.hl_l2340dw_status"]},
blocking=True,
)
assert len(mock_update.mock_calls) == 1
|
tchellomello/home-assistant
|
tests/components/brother/test_sensor.py
|
Python
|
apache-2.0
| 10,659
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import Permission
from django.contrib.auth.forms import PasswordResetForm
from .models import MyUser
from forms import CustomUserRegistrationForm
class PermissionAdmin(admin.ModelAdmin):
search_fields = ['name', 'codename']
class CustomUserAdmin(UserAdmin):
add_form = CustomUserRegistrationForm
list_display = ['email', 'first_name', 'last_name', 'is_active', 'confirmed', 'osf_id']
fieldsets = (
(None, {'fields': ('email', 'password',)}),
('Personal info', {'fields': ('first_name', 'last_name', 'email', 'date_joined', 'last_login', 'osf_id')}),
('Permissions', {'fields': ('is_active', 'is_staff', 'is_superuser', 'groups', 'user_permissions',)}),
)
add_fieldsets = (
(None, {'fields':
('email', 'first_name', 'last_name', 'password1', 'password2'),
}),)
search_fields = ('email', 'first_name', 'last_name',)
ordering = ('last_name', 'first_name',)
actions = ['send_email_invitation']
# TODO - include alternative messages for warning/failure
def send_email_invitation(self, request, queryset):
for user in queryset:
reset_form = PasswordResetForm({'email': user.email}, request.POST)
assert reset_form.is_valid()
reset_form.save(
#subject_template_name='templates/emails/account_creation_subject.txt',
#email_template_name='templates/emails/invitation_email.html',
request=request
)
self.message_user(request, 'Email invitation successfully sent')
send_email_invitation.short_description = 'Send email invitation to selected users'
def save_model(self, request, obj, form, change):
if change:
pass
else:
obj.is_active = False
obj.save()
admin.site.register(MyUser, CustomUserAdmin)
admin.site.register(Permission, PermissionAdmin)
|
brandonPurvis/osf.io
|
admin/common_auth/admin.py
|
Python
|
apache-2.0
| 2,035
|
import binascii
import glob
import os
import stat
import struct
import time
from distutils.version import LooseVersion
import pytest
import logging
from cassandra import WriteTimeout
from cassandra.cluster import NoHostAvailable, OperationTimedOut
from ccmlib.common import is_win
from ccmlib.node import Node, TimeoutError
from parse import parse
from dtest import Tester, create_ks
from tools.assertions import (assert_almost_equal, assert_none, assert_one, assert_lists_equal_ignoring_order)
from tools.data import rows_to_list
since = pytest.mark.since
logger = logging.getLogger(__name__)
class TestCommitLog(Tester):
"""
CommitLog Tests
"""
@pytest.fixture(autouse=True)
def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
fixture_dtest_setup.allow_log_errors = True
@pytest.fixture(scope='function', autouse=True)
def fixture_set_cluster_settings(self, fixture_dtest_setup):
if fixture_dtest_setup.dtest_config.cassandra_version_from_build >= '3.0':
fixture_dtest_setup.cluster.set_configuration_options({'enable_materialized_views': 'true'})
fixture_dtest_setup.cluster.populate(1)
[self.node1] = fixture_dtest_setup.cluster.nodelist()
yield
# Some of the tests change commitlog permissions to provoke failure
# so this changes them back so we can delete them.
self._change_commitlog_perms(stat.S_IWRITE | stat.S_IREAD | stat.S_IEXEC)
def prepare(self, configuration=None, create_test_keyspace=True, **kwargs):
if configuration is None:
configuration = {}
default_conf = {'commitlog_sync_period_in_ms': 1000}
set_conf = dict(default_conf, **configuration)
logger.debug('setting commitlog configuration with the following values: '
'{set_conf} and the following kwargs: {kwargs}'.format(
set_conf=set_conf, kwargs=kwargs))
self.cluster.set_configuration_options(values=set_conf, **kwargs)
self.cluster.start()
self.session1 = self.patient_cql_connection(self.node1)
if create_test_keyspace:
self.session1.execute("DROP KEYSPACE IF EXISTS ks;")
create_ks(self.session1, 'ks', 1)
self.session1.execute("DROP TABLE IF EXISTS test;")
query = """
CREATE TABLE test (
key int primary key,
col1 int
)
"""
self.session1.execute(query)
def _change_commitlog_perms(self, mod):
for path in self._get_commitlog_paths():
logger.debug('changing permissions to {perms} on {path}'.format(perms=oct(mod), path=path))
os.chmod(path, mod)
commitlogs = glob.glob(path + '/*')
if commitlogs:
logger.debug('changing permissions to {perms} on the following files:'
'\n {files}'.format(perms=oct(mod), files='\n '.join(commitlogs)))
else:
logger.debug(self._change_commitlog_perms.__name__ + ' called on empty commitlog directory '
'{path} with permissions {perms}'.format(path=path, perms=oct(mod)))
for commitlog in commitlogs:
os.chmod(commitlog, mod)
def _get_commitlog_paths(self):
"""
Returns the list of commitlog and cdc paths
"""
# TODO: this does not account for non-default commitlog/cdc paths
# specified in cassandra.yaml
return [d for d in [os.path.join(self.node1.get_path(), 'commitlogs'),
os.path.join(self.node1.get_path(), 'cdc')]
if os.path.isdir(d)]
def _get_commitlog_files(self):
"""
Returns the paths to commitlog files
"""
return [os.path.join(path, filename)
for path in self._get_commitlog_paths()
for filename in os.listdir(path)]
def _segment_size_test(self, segment_size_in_mb, compressed=False):
"""
Execute a basic commitlog test and validate the commitlog files
"""
conf = {'commitlog_segment_size_in_mb': segment_size_in_mb}
if compressed:
conf['commitlog_compression'] = [{'class_name': 'LZ4Compressor'}]
conf['memtable_heap_space_in_mb'] = 512
self.prepare(configuration=conf, create_test_keyspace=False)
segment_size = segment_size_in_mb * 1024 * 1024
self.node1.stress(['write', 'n=150k', 'no-warmup', '-rate', 'threads=25'])
time.sleep(1)
commitlogs = self._get_commitlog_files()
assert len(commitlogs) > 0, 'No commit log files were created'
# the most recently-written segment of the commitlog may be smaller
# than the expected size, so we allow exactly one segment to be smaller
smaller_found = False
for i, f in enumerate(commitlogs):
size = os.path.getsize(f)
size_in_mb = int(size / 1024 / 1024)
logger.debug('segment file {} {}; smaller already found: {}'.format(f, size_in_mb, smaller_found))
if size_in_mb < 1 or size < (segment_size * 0.1):
logger.debug('segment file not yet used; moving to next file')
continue # commitlog not yet used
try:
if compressed:
# if compression is used, we assume there will be at most a 50% compression ratio
assert size < segment_size
assert size > segment_size / 2
else:
# if no compression is used, the size will be close to what we expect
assert_almost_equal(size, segment_size, error=0.05)
except AssertionError as e:
# the last segment may be smaller
if not smaller_found:
assert size <= segment_size
smaller_found = True
else:
raise e
def _provoke_commitlog_failure(self):
"""
Provoke the commitlog failure
"""
logger.debug('Provoking commitlog failure')
# Test things are ok at this point
self.session1.execute("""
INSERT INTO test (key, col1) VALUES (1, 1);
""")
assert_one(
self.session1,
"SELECT * FROM test where key=1;",
[1, 1]
)
self._change_commitlog_perms(0)
# Use stress_process to skip internal error handling in ccm. Grep node logs for specific errors in test method.
self.node1.stress_process(['write', 'n=1M', 'no-warmup', '-col', 'size=FIXED(1000)', '-rate', 'threads=25']).communicate()
@since('3.0.7')
def test_mv_lock_contention_during_replay(self):
"""
Ensure that we don't generate WriteTimeoutExceptions during commitlog
replay due to MV lock contention. Fixed in 3.0.7 and 3.7.
@jira_ticket CASSANDRA-11891
"""
cluster_ver = self.cluster.version()
if LooseVersion('3.1') <= cluster_ver < LooseVersion('3.7'):
pytest.skip("Fixed in 3.0.7 and 3.7")
node1 = self.node1
node1.set_batch_commitlog(enabled=True)
node1.start()
session = self.patient_cql_connection(node1)
logger.debug("Creating schema")
create_ks(session, 'Test', 1)
session.execute("""
CREATE TABLE mytable (
a int,
b int,
c int,
PRIMARY KEY (a, b)
);
""")
session.execute("""
CREATE MATERIALIZED VIEW myview
AS SELECT * FROM mytable
WHERE a IS NOT NULL AND b IS NOT NULL
PRIMARY KEY (a, b);
""")
logger.debug("Insert data")
num_rows = 1024 # maximum number of mutations replayed at once by the commit log
for i in range(num_rows):
session.execute("INSERT INTO Test.mytable (a, b, c) VALUES (0, {i}, {i})".format(i=i))
node1.stop(gently=False)
node1.mark_log_for_errors()
logger.debug("Verify commitlog was written before abrupt stop")
commitlog_files = os.listdir(os.path.join(node1.get_path(), 'commitlogs'))
assert [] != commitlog_files
# set a short timeout to ensure lock contention will generally exceed this
node1.set_configuration_options({'write_request_timeout_in_ms': 30})
logger.debug("Starting node again")
node1.start()
logger.debug("Verify commit log was replayed on startup")
start_time, replay_complete = time.time(), False
while not replay_complete:
matches = node1.grep_log(r".*WriteTimeoutException.*")
assert [] == matches
replay_complete = node1.grep_log("Log replay complete")
assert time.time() - start_time < 120, "Did not finish commitlog replay within 120 seconds"
logger.debug("Reconnecting to node")
session = self.patient_cql_connection(node1)
logger.debug("Make query to ensure data is present")
res = list(session.execute("SELECT * FROM Test.mytable"))
assert num_rows == len(res), res
def test_commitlog_replay_on_startup(self):
"""
Test commit log replay
"""
node1 = self.node1
node1.set_batch_commitlog(enabled=True)
node1.start()
logger.debug("Insert data")
session = self.patient_cql_connection(node1)
create_ks(session, 'Test', 1)
session.execute("""
CREATE TABLE users (
user_name varchar PRIMARY KEY,
password varchar,
gender varchar,
state varchar,
birth_year bigint
);
""")
session.execute("INSERT INTO Test. users (user_name, password, gender, state, birth_year) "
"VALUES('gandalf', 'p@$$', 'male', 'WA', 1955);")
logger.debug("Verify data is present")
session = self.patient_cql_connection(node1)
res = session.execute("SELECT * FROM Test. users")
assert rows_to_list(res) == [['gandalf', 1955, 'male', 'p@$$', 'WA']]
logger.debug("Stop node abruptly")
node1.stop(gently=False)
logger.debug("Verify commitlog was written before abrupt stop")
commitlog_dir = os.path.join(node1.get_path(), 'commitlogs')
commitlog_files = os.listdir(commitlog_dir)
assert len(commitlog_files) > 0
logger.debug("Verify no SSTables were flushed before abrupt stop")
assert 0 == len(node1.get_sstables('test', 'users'))
logger.debug("Verify commit log was replayed on startup")
node1.start()
node1.watch_log_for("Log replay complete")
# Here we verify from the logs that some mutations were replayed
replays = [match_tuple[0] for match_tuple in node1.grep_log(r" \d+ replayed mutations")]
logger.debug('The following log lines indicate that mutations were replayed: {msgs}'.format(msgs=replays))
num_replayed_mutations = [
parse('{} {num_mutations:d} replayed mutations{}', line).named['num_mutations']
for line in replays
]
# assert there were some lines where more than zero mutations were replayed
assert [m for m in num_replayed_mutations if m > 0] != []
logger.debug("Make query and ensure data is present")
session = self.patient_cql_connection(node1)
res = session.execute("SELECT * FROM Test. users")
assert_lists_equal_ignoring_order(rows_to_list(res), [['gandalf', 1955, 'male', 'p@$$', 'WA']])
def test_default_segment_size(self):
"""
Test default commitlog_segment_size_in_mb (32MB)
"""
self._segment_size_test(32)
def test_small_segment_size(self):
"""
Test a small commitlog_segment_size_in_mb (5MB)
"""
self._segment_size_test(5)
@since('2.2')
def test_default_compressed_segment_size(self):
"""
Test default compressed commitlog_segment_size_in_mb (32MB)
"""
self._segment_size_test(32, compressed=True)
@since('2.2')
def test_small_compressed_segment_size(self):
"""
Test a small compressed commitlog_segment_size_in_mb (5MB)
"""
self._segment_size_test(5, compressed=True)
def test_stop_failure_policy(self):
"""
Test the stop commitlog failure policy (default one)
"""
self.prepare()
self._provoke_commitlog_failure()
failure = self.node1.grep_log("Failed .+ commit log segments. Commit disk failure policy is stop; terminating thread")
logger.debug(failure)
assert failure, "Cannot find the commitlog failure message in logs"
assert self.node1.is_running(), "Node1 should still be running"
# Cannot write anymore after the failure
with pytest.raises(NoHostAvailable):
self.session1.execute("""
INSERT INTO test (key, col1) VALUES (2, 2);
""")
# Should not be able to read neither
with pytest.raises(NoHostAvailable):
self.session1.execute("""
"SELECT * FROM test;"
""")
def test_stop_commit_failure_policy(self):
"""
Test the stop_commit commitlog failure policy
"""
self.prepare(configuration={
'commit_failure_policy': 'stop_commit'
})
self.session1.execute("""
INSERT INTO test (key, col1) VALUES (2, 2);
""")
self._provoke_commitlog_failure()
failure = self.node1.grep_log("Failed .+ commit log segments. Commit disk failure policy is stop_commit; terminating thread")
logger.debug(failure)
assert failure, "Cannot find the commitlog failure message in logs"
assert self.node1.is_running(), "Node1 should still be running"
# Cannot write anymore after the failure
logger.debug('attempting to insert to node with failing commitlog; should fail')
with pytest.raises((OperationTimedOut, WriteTimeout)):
self.session1.execute("""
INSERT INTO test (key, col1) VALUES (2, 2);
""")
# Should be able to read
logger.debug('attempting to read from node with failing commitlog; should succeed')
assert_one(
self.session1,
"SELECT * FROM test where key=2;",
[2, 2]
)
def test_die_failure_policy(self):
"""
Test the die commitlog failure policy
"""
self.prepare(configuration={
'commit_failure_policy': 'die'
})
self._provoke_commitlog_failure()
failure = self.node1.grep_log(r"ERROR \[COMMIT-LOG-ALLOCATOR\].+JVM state determined to be unstable. Exiting forcefully")
logger.debug(failure)
assert failure, "Cannot find the commitlog failure message in logs"
assert not self.node1.is_running(), "Node1 should not be running"
def test_ignore_failure_policy(self):
"""
Test the ignore commitlog failure policy
"""
self.prepare(configuration={
'commit_failure_policy': 'ignore'
})
self._provoke_commitlog_failure()
failure = self.node1.grep_log(r"ERROR \[COMMIT-LOG-ALLOCATOR\].+Failed .+ commit log segments")
assert failure, "Cannot find the commitlog failure message in logs"
assert self.node1.is_running(), "Node1 should still be running"
# on Windows, we can't delete the segments if they're chmod to 0 so they'll still be available for use by CLSM,
# and we can still create new segments since os.chmod is limited to stat.S_IWRITE and stat.S_IREAD to set files
# as read-only. New mutations will still be allocated and WriteTimeouts will not be raised. It's sufficient that
# we confirm that a) the node isn't dead (stop) and b) the node doesn't terminate the thread (stop_commit)
query = "INSERT INTO test (key, col1) VALUES (2, 2);"
if is_win():
# We expect this to succeed
self.session1.execute(query)
assert not self.node1.grep_log("terminating thread"), "thread was terminated but CL error should have been ignored."
assert self.node1.is_running(), "Node1 should still be running after an ignore error on CL"
else:
with pytest.raises((OperationTimedOut, WriteTimeout)):
self.session1.execute(query)
# Should not exist
assert_none(self.session1, "SELECT * FROM test where key=2;")
# bring back the node commitlogs
self._change_commitlog_perms(stat.S_IWRITE | stat.S_IREAD | stat.S_IEXEC)
self.session1.execute("""
INSERT INTO test (key, col1) VALUES (3, 3);
""")
assert_one(
self.session1,
"SELECT * FROM test where key=3;",
[3, 3]
)
time.sleep(2)
assert_one(
self.session1,
"SELECT * FROM test where key=2;",
[2, 2]
)
@since('2.2')
def test_bad_crc(self):
"""
if the commit log header crc (checksum) doesn't match the actual crc of the header data,
and the commit_failure_policy is stop, C* shouldn't startup
@jira_ticket CASSANDRA-9749
"""
expected_error = "Exiting due to error while processing commit log during initialization."
self.fixture_dtest_setup.ignore_log_patterns = list(self.fixture_dtest_setup.ignore_log_patterns) + [
expected_error]
node = self.node1
assert isinstance(node, Node)
node.set_configuration_options({'commit_failure_policy': 'stop', 'commitlog_sync_period_in_ms': 1000})
self.cluster.start()
cursor = self.patient_cql_connection(self.cluster.nodelist()[0])
create_ks(cursor, 'ks', 1)
cursor.execute("CREATE TABLE ks.tbl (k INT PRIMARY KEY, v INT)")
for i in range(10):
cursor.execute("INSERT INTO ks.tbl (k, v) VALUES ({0}, {0})".format(i))
results = list(cursor.execute("SELECT * FROM ks.tbl"))
assert len(results) == 10
# with the commitlog_sync_period_in_ms set to 1000,
# this sleep guarantees that the commitlog data is
# actually flushed to disk before we kill -9 it
time.sleep(1)
node.stop(gently=False)
# check that ks.tbl hasn't been flushed
path = node.get_path()
for data_dir in node.data_directories():
ks_dir = os.path.join(data_dir, 'ks')
db_dir = os.listdir(ks_dir)[0]
sstables = len([f for f in os.listdir(os.path.join(ks_dir, db_dir)) if f.endswith('.db')])
assert sstables == 0
# modify the commit log crc values
cl_dir = os.path.join(path, 'commitlogs')
assert len(os.listdir(cl_dir)) > 0
for cl in os.listdir(cl_dir):
# locate the CRC location
with open(os.path.join(cl_dir, cl), 'rb') as f:
f.seek(0)
version = struct.unpack('>i', f.read(4))[0]
crc_pos = 12
if version >= 5:
f.seek(crc_pos)
psize = struct.unpack('>h', f.read(2))[0] & 0xFFFF
crc_pos += 2 + psize
# rewrite it with crap
with open(os.path.join(cl_dir, cl), 'wb') as f:
f.seek(crc_pos)
f.write(struct.pack('>i', 123456))
# verify said crap
with open(os.path.join(cl_dir, cl), 'rb') as f:
f.seek(crc_pos)
crc = struct.unpack('>i', f.read(4))[0]
assert crc == 123456
mark = node.mark_log()
node.start()
node.watch_log_for(expected_error, from_mark=mark)
with pytest.raises(TimeoutError):
node.wait_for_binary_interface(from_mark=mark, timeout=20)
assert not node.is_running()
@since('2.2')
def test_compression_error(self):
"""
@jira_ticket CASSANDRA-7886
if the commit log header refers to an unknown compression class, and
the commit_failure_policy is stop, C* shouldn't start up
"""
expected_error = 'Could not create Compression for type org.apache.cassandra.io.compress.LZ5Compressor'
self.fixture_dtest_setup.ignore_log_patterns = list(self.fixture_dtest_setup.ignore_log_patterns) + [
expected_error]
node = self.node1
assert isinstance(node, Node)
node.set_configuration_options({'commit_failure_policy': 'stop',
'commitlog_compression': [{'class_name': 'LZ4Compressor'}],
'commitlog_sync_period_in_ms': 1000})
self.cluster.start()
cursor = self.patient_cql_connection(self.cluster.nodelist()[0])
create_ks(cursor, 'ks1', 1)
cursor.execute("CREATE TABLE ks1.tbl (k INT PRIMARY KEY, v INT)")
for i in range(10):
cursor.execute("INSERT INTO ks1.tbl (k, v) VALUES ({0}, {0})".format(i))
results = list(cursor.execute("SELECT * FROM ks1.tbl"))
assert len(results) == 10
# with the commitlog_sync_period_in_ms set to 1000,
# this sleep guarantees that the commitlog data is
# actually flushed to disk before we kill -9 it
time.sleep(1)
node.stop(gently=False)
# check that ks1.tbl hasn't been flushed
sstables = 0
for data_dir in node.data_directories():
ks_dir = os.path.join(data_dir, 'ks1')
db_dir = os.listdir(ks_dir)[0]
sstables = sstables + len([f for f in os.listdir(os.path.join(ks_dir, db_dir)) if f.endswith('.db')])
assert sstables == 0
def get_header_crc(header):
"""
When calculating the header crc, C* splits up the 8b id, first adding the 4 least significant
bytes to the crc, then the 5 most significant bytes, so this splits them and calculates the same way
"""
new_header = bytearray(header[:4])
# C* evaluates most and least significant 4 bytes out of order
new_header.extend(header[8:12])
new_header.extend(header[4:8])
# C* evaluates the short parameter length as an int
new_header.extend(b'\x00\x00')
new_header.extend(header[12:14]) # the
new_header.extend(header[14:])
# https://docs.python.org/2/library/binascii.html
# "Changed in version 2.6: The return value is in the range [-2**31, 2**31-1] regardless
# of platform. In the past the value would be signed on some platforms and unsigned on
# others. Use & 0xffffffff on the value if you want it to match Python 3 behavior."
return binascii.crc32(new_header) & 0xffffffff
# modify the compression parameters to look for a compressor that isn't there
# while this scenario is pretty unlikely, if a jar or lib got moved or something,
# you'd have a similar situation, which would be fixable by the user
path = node.get_path()
cl_dir = os.path.join(path, 'commitlogs')
assert len(os.listdir(cl_dir)) > 0
for cl in os.listdir(cl_dir):
# read the header and find the crc location
with open(os.path.join(cl_dir, cl), 'rb') as f:
f.seek(0)
crc_pos = 12
f.seek(crc_pos)
psize = struct.unpack('>h', f.read(2))[0] & 0xFFFF
crc_pos += 2 + psize
header_length = crc_pos
f.seek(crc_pos)
crc = struct.unpack('>i', f.read(4))[0]
# check that we're going this right
f.seek(0)
header_bytes = f.read(header_length)
# https://docs.python.org/2/library/binascii.html
# "Changed in version 2.6: The return value is in the range [-2**31, 2**31-1] regardless
# of platform. In the past the value would be signed on some platforms and unsigned on
# others. Use & 0xffffffff on the value if you want it to match Python 3 behavior."
assert get_header_crc(header_bytes) == (crc & 0xffffffff)
# rewrite it with imaginary compressor
assert 'LZ4Compressor'.encode("ascii") in header_bytes
header_bytes = header_bytes.replace('LZ4Compressor'.encode("ascii"), 'LZ5Compressor'.encode("ascii"))
assert 'LZ4Compressor'.encode("ascii") not in header_bytes
assert 'LZ5Compressor'.encode("ascii") in header_bytes
with open(os.path.join(cl_dir, cl), 'wb') as f:
f.seek(0)
f.write(header_bytes)
f.seek(crc_pos)
f.write(struct.pack('>I', get_header_crc(header_bytes)))
# verify we wrote everything correctly
with open(os.path.join(cl_dir, cl), 'rb') as f:
f.seek(0)
assert f.read(header_length) == header_bytes
f.seek(crc_pos)
crc = struct.unpack('>i', f.read(4))[0]
# https://docs.python.org/2/library/binascii.html
# "Changed in version 2.6: The return value is in the range [-2**31, 2**31-1] regardless
# of platform. In the past the value would be signed on some platforms and unsigned on
# others. Use & 0xffffffff on the value if you want it to match Python 3 behavior."
assert (crc & 0xffffffff) == get_header_crc(header_bytes)
mark = node.mark_log()
node.start()
node.watch_log_for(expected_error, from_mark=mark)
with pytest.raises(TimeoutError):
node.wait_for_binary_interface(from_mark=mark, timeout=20)
|
stef1927/cassandra-dtest
|
commitlog_test.py
|
Python
|
apache-2.0
| 26,153
|
from typing import Any, Optional
import sqlalchemy
from django.db import connection
from zerver.lib.db import TimeTrackingConnection
# This is a Pool that doesn't close connections. Therefore it can be used with
# existing Django database connections.
class NonClosingPool(sqlalchemy.pool.NullPool):
def status(self) -> str:
return "NonClosingPool"
def _do_return_conn(self, conn: sqlalchemy.engine.base.Connection) -> None:
pass
def recreate(self) -> 'NonClosingPool':
return self.__class__(creator=self._creator,
recycle=self._recycle,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
echo=self.echo,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch)
sqlalchemy_engine: Optional[Any] = None
def get_sqlalchemy_connection() -> sqlalchemy.engine.base.Connection:
global sqlalchemy_engine
if sqlalchemy_engine is None:
def get_dj_conn() -> TimeTrackingConnection:
connection.ensure_connection()
return connection.connection
sqlalchemy_engine = sqlalchemy.create_engine('postgresql://',
creator=get_dj_conn,
poolclass=NonClosingPool,
pool_reset_on_return=False)
sa_connection = sqlalchemy_engine.connect()
sa_connection.execution_options(autocommit=False)
return sa_connection
|
timabbott/zulip
|
zerver/lib/sqlalchemy_utils.py
|
Python
|
apache-2.0
| 1,660
|
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
class Base(Exception):
error_code = 500
error_type = None
error_message = None
errors = None
def __init__(self, *args, **kwargs):
self.errors = kwargs.pop('errors', None)
self.object = kwargs.pop('object', None)
super(Base, self).__init__(*args, **kwargs)
if len(args) > 0 and isinstance(args[0], six.string_types):
self.error_message = args[0]
class Backend(Exception):
pass
class RelationNotLoaded(Base):
error_code = 500
error_type = 'relation_not_loaded'
def __init__(self, *args, **kwargs):
self.relation = kwargs.pop('relation', None)
super(RelationNotLoaded, self).__init__(*args, **kwargs)
self.error_message = "%(relation)s is not loaded on %(object)s" % \
{"relation": self.relation, "object": self.object.obj_name()}
def __str__(self):
return self.error_message
class AdapterNotFound(Base):
error_code = 500
error_type = 'adapter_not_found'
class NSD4SlaveBackendError(Backend):
pass
class NotImplemented(Base, NotImplementedError):
pass
class XFRFailure(Base):
pass
class ConfigurationError(Base):
error_type = 'configuration_error'
class UnknownFailure(Base):
error_code = 500
error_type = 'unknown_failure'
class CommunicationFailure(Base):
error_code = 504
error_type = 'communication_failure'
class NeutronCommunicationFailure(CommunicationFailure):
"""
Raised in case one of the alleged Neutron endpoints fails.
"""
error_type = 'neutron_communication_failure'
class NoServersConfigured(ConfigurationError):
error_code = 500
error_type = 'no_servers_configured'
class NoPoolTargetsConfigured(ConfigurationError):
error_code = 500
error_type = 'no_pool_targets_configured'
class OverQuota(Base):
error_code = 413
error_type = 'over_quota'
class QuotaResourceUnknown(Base):
error_type = 'quota_resource_unknown'
class InvalidObject(Base):
error_code = 400
error_type = 'invalid_object'
expected = True
class BadRequest(Base):
error_code = 400
error_type = 'bad_request'
expected = True
class EmptyRequestBody(BadRequest):
error_type = 'empty_request_body'
expected = True
class InvalidUUID(BadRequest):
error_type = 'invalid_uuid'
class NetworkEndpointNotFound(BadRequest):
error_type = 'no_endpoint'
error_code = 403
class MarkerNotFound(BadRequest):
error_type = 'marker_not_found'
class ValueError(BadRequest):
error_type = 'value_error'
class InvalidMarker(BadRequest):
error_type = 'invalid_marker'
class InvalidSortDir(BadRequest):
error_type = 'invalid_sort_dir'
class InvalidLimit(BadRequest):
error_type = 'invalid_limit'
class InvalidSortKey(BadRequest):
error_type = 'invalid_sort_key'
class InvalidJson(BadRequest):
error_type = 'invalid_json'
class InvalidOperation(BadRequest):
error_code = 400
error_type = 'invalid_operation'
class UnsupportedAccept(BadRequest):
error_code = 406
error_type = 'unsupported_accept'
class UnsupportedContentType(BadRequest):
error_code = 415
error_type = 'unsupported_content_type'
class InvalidDomainName(Base):
error_code = 400
error_type = 'invalid_domain_name'
expected = True
class InvalidRecordSetName(Base):
error_code = 400
error_type = 'invalid_recordset_name'
class InvalidRecordSetLocation(Base):
error_code = 400
error_type = 'invalid_recordset_location'
class InvaildZoneTransfer(Base):
error_code = 400
error_type = 'invalid_zone_transfer_request'
class InvalidTTL(Base):
error_code = 400
error_type = 'invalid_ttl'
class DomainHasSubdomain(Base):
error_code = 400
error_type = 'domain_has_subdomain'
class Forbidden(Base):
error_code = 403
error_type = 'forbidden'
expected = True
class IllegalChildDomain(Forbidden):
error_type = 'illegal_child'
class IllegalParentDomain(Forbidden):
error_type = 'illegal_parent'
class IncorrectZoneTransferKey(Forbidden):
error_type = 'invalid_key'
class Duplicate(Base):
expected = True
error_code = 409
error_type = 'duplicate'
class DuplicateQuota(Duplicate):
error_type = 'duplicate_quota'
class DuplicateServer(Duplicate):
error_type = 'duplicate_server'
class DuplicateTsigKey(Duplicate):
error_type = 'duplicate_tsigkey'
class DuplicateDomain(Duplicate):
error_type = 'duplicate_domain'
class DuplicateTld(Duplicate):
error_type = 'duplicate_tld'
class DuplicateRecordSet(Duplicate):
error_type = 'duplicate_recordset'
class DuplicateRecord(Duplicate):
error_type = 'duplicate_record'
class DuplicateBlacklist(Duplicate):
error_type = 'duplicate_blacklist'
class DuplicatePoolManagerStatus(Duplicate):
error_type = 'duplication_pool_manager_status'
class DuplicatePool(Duplicate):
error_type = 'duplicate_pool'
class DuplicatePoolAttribute(Duplicate):
error_type = 'duplicate_pool_attribute'
class DuplicateDomainAttribute(Duplicate):
error_type = 'duplicate_domain_attribute'
class DuplicatePoolNsRecord(Duplicate):
error_type = 'duplicate_pool_ns_record'
class DuplicateZoneImport(Duplicate):
error_type = 'duplicate_zone_import'
class DuplicateZoneExport(Duplicate):
error_type = 'duplicate_zone_export'
class MethodNotAllowed(Base):
expected = True
error_code = 405
error_type = 'method_not_allowed'
class DuplicateZoneTransferRequest(Duplicate):
error_type = 'duplicate_zone_transfer_request'
class DuplicateZoneTransferAccept(Duplicate):
error_type = 'duplicate_zone_transfer_accept'
class NotFound(Base):
expected = True
error_code = 404
error_type = 'not_found'
class QuotaNotFound(NotFound):
error_type = 'quota_not_found'
class ServerNotFound(NotFound):
error_type = 'server_not_found'
class TsigKeyNotFound(NotFound):
error_type = 'tsigkey_not_found'
class BlacklistNotFound(NotFound):
error_type = 'blacklist_not_found'
class DomainNotFound(NotFound):
error_type = 'domain_not_found'
class DomainMasterNotFound(NotFound):
error_type = 'domain_master_not_found'
class DomainAttributeNotFound(NotFound):
error_type = 'domain_attribute_not_found'
class TldNotFound(NotFound):
error_type = 'tld_not_found'
class RecordSetNotFound(NotFound):
error_type = 'recordset_not_found'
class RecordNotFound(NotFound):
error_type = 'record_not_found'
class ReportNotFound(NotFound):
error_type = 'report_not_found'
class PoolManagerStatusNotFound(NotFound):
error_type = 'pool_manager_status_not_found'
class PoolNotFound(NotFound):
error_type = 'pool_not_found'
class PoolAttributeNotFound(NotFound):
error_type = 'pool_attribute_not_found'
class PoolNsRecordNotFound(NotFound):
error_type = 'pool_ns_record_not_found'
class ZoneTransferRequestNotFound(NotFound):
error_type = 'zone_transfer_request_not_found'
class ZoneTransferAcceptNotFound(NotFound):
error_type = 'zone_transfer_accept_not_found'
class ZoneImportNotFound(NotFound):
error_type = 'zone_import_not_found'
class ZoneExportNotFound(NotFound):
error_type = 'zone_export_not_found'
class LastServerDeleteNotAllowed(BadRequest):
error_type = 'last_server_delete_not_allowed'
class ResourceNotFound(NotFound):
# TODO(kiall): Should this be extending NotFound??
pass
|
cneill/designate-testing
|
designate/exceptions.py
|
Python
|
apache-2.0
| 8,130
|
from .utils import DslBase, BoolMixin, _make_dsl_class
from .function import SF, ScoreFunction
__all__ = [
'Q', 'Bool', 'Boosting', 'Common', 'ConstantScore', 'DisMax', 'Filtered',
'FunctionScore', 'Fuzzy', 'FuzzyLikeThis', 'FuzzyLikeThisField',
'GeoShape', 'HasChild', 'HasParent', 'Ids', 'Indices', 'Match', 'MatchAll',
'MatchPhrase', 'MatchPhrasePrefix', 'MoreLikeThis', 'MoreLikeThisField',
'MultiMatch', 'Nested', 'Prefix', 'Query', 'QueryString', 'Range',
'Regexp', 'SF', 'ScoreFunction', 'SimpleQueryString', 'SpanFirst',
'SpanMulti', 'SpanNear', 'SpanNot', 'SpanOr', 'SpanTerm', 'Template',
'Term', 'Terms', 'TopChildren', 'Wildcard'
]
def Q(name_or_query='match_all', **params):
# {"match": {"title": "python"}}
if isinstance(name_or_query, dict):
if params:
raise ValueError('Q() cannot accept parameters when passing in a dict.')
if len(name_or_query) != 1:
raise ValueError('Q() can only accept dict with a single query ({"match": {...}}). '
'Instead it got (%r)' % name_or_query)
name, params = name_or_query.copy().popitem()
return Query.get_dsl_class(name)(**params)
# MatchAll()
if isinstance(name_or_query, Query):
if params:
raise ValueError('Q() cannot accept parameters when passing in a Query object.')
return name_or_query
# s.query = Q('filtered', query=s.query)
if hasattr(name_or_query, '_proxied'):
return name_or_query._proxied
# "match", title="python"
return Query.get_dsl_class(name_or_query)(**params)
class Query(DslBase):
_type_name = 'query'
_type_shortcut = staticmethod(Q)
name = None
class MatchAll(Query):
name = 'match_all'
def __add__(self, other):
return other._clone()
__and__ = __rand__ = __radd__ = __add__
def __or__(self, other):
return self
__ror__ = __or__
EMPTY_QUERY = MatchAll()
class Bool(BoolMixin, Query):
name = 'bool'
_param_defs = {
'must': {'type': 'query', 'multi': True},
'should': {'type': 'query', 'multi': True},
'must_not': {'type': 'query', 'multi': True},
}
def __and__(self, other):
q = self._clone()
if isinstance(other, self.__class__):
q.must += other.must
q.must_not += other.must_not
q.should = []
for qx in (self, other):
min_should_match = getattr(qx, 'minimum_should_match', 0 if any((qx.must, qx.must_not)) else 1)
# all subqueries are required
if len(qx.should) <= min_should_match:
q.must.extend(qx.should)
# not all of them are required, use it and remember min_should_match
elif not q.should:
q.minimum_should_match = min_should_match
q.should = qx.should
# not all are required, add a should list to the must with proper min_should_match
else:
q.must.append(Bool(should=qx.should, minimum_should_match=min_should_match))
else:
q.must.append(other)
return q
__rand__ = __and__
# register this as Bool for Query
Query._bool = Bool
class FunctionScore(Query):
name = 'function_score'
_param_defs = {
'query': {'type': 'query'},
'filter': {'type': 'filter'},
'functions': {'type': 'score_function', 'multi': True},
}
def __init__(self, **kwargs):
if 'functions' in kwargs:
pass
else:
fns = kwargs['functions'] = []
for name in ScoreFunction._classes:
if name in kwargs:
fns.append({name: kwargs.pop(name)})
super(FunctionScore, self).__init__(**kwargs)
QUERIES = (
# compound queries
('boosting', {'positive': {'type': 'query'}, 'negative': {'type': 'query'}}),
('constant_score', {'query': {'type': 'query'}, 'filter': {'type': 'filter'}}),
('dis_max', {'queries': {'type': 'query', 'multi': True}}),
('filtered', {'query': {'type': 'query'}, 'filter': {'type': 'filter'}}),
('indices', {'query': {'type': 'query'}, 'no_match_query': {'type': 'query'}}),
# relationship queries
('nested', {'query': {'type': 'query'}}),
('has_child', {'query': {'type': 'query'}}),
('has_parent', {'query': {'type': 'query'}}),
('top_children', {'query': {'type': 'query'}}),
# compount span queries
('span_first', {'match': {'type': 'query'}}),
('span_multi', {'match': {'type': 'query'}}),
('span_near', {'clauses': {'type': 'query', 'multi': True}}),
('span_not', {'exclude': {'type': 'query'}, 'include': {'type': 'query'}}),
('span_or', {'clauses': {'type': 'query', 'multi': True}}),
# core queries
('common', None),
('fuzzy', None),
('fuzzy_like_this', None),
('fuzzy_like_this_field', None),
('geo_shape', None),
('ids', None),
('match', None),
('match_phrase', None),
('match_phrase_prefix', None),
('more_like_this', None),
('more_like_this_field', None),
('multi_match', None),
('prefix', None),
('query_string', None),
('range', None),
('regexp', None),
('simple_query_string', None),
('span_term', None),
('template', None),
('term', None),
('terms', None),
('wildcard', None),
)
# generate the query classes dynamicaly
for qname, params_def in QUERIES:
qclass = _make_dsl_class(Query, qname, params_def)
globals()[qclass.__name__] = qclass
|
reflection/elasticsearch-dsl-py
|
elasticsearch_dsl/query.py
|
Python
|
apache-2.0
| 5,587
|
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class ScopedEnumType(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipIf(dwarf_version=['<', '4'])
def test(self):
self.build()
self.main_source = "main.cpp"
self.main_source_spec = lldb.SBFileSpec(self.main_source)
(target, process, thread, bkpt) = lldbutil.run_to_source_breakpoint(self,
'// Set break point at this line.', self.main_source_spec)
frame = thread.GetFrameAtIndex(0)
self.expect("expr f == Foo::FooBar",
substrs=['(bool) $0 = true'])
value = frame.EvaluateExpression("f == Foo::FooBar")
self.assertTrue(value.IsValid())
self.assertTrue(value.GetError().Success())
self.assertEqual(value.GetValueAsUnsigned(), 1)
value = frame.EvaluateExpression("b == BarBar")
self.assertTrue(value.IsValid())
self.assertTrue(value.GetError().Success())
self.assertEqual(value.GetValueAsUnsigned(), 1)
## b is not a Foo
value = frame.EvaluateExpression("b == Foo::FooBar")
self.assertTrue(value.IsValid())
self.assertFalse(value.GetError().Success())
## integral is not implicitly convertible to a scoped enum
value = frame.EvaluateExpression("1 == Foo::FooBar")
self.assertTrue(value.IsValid())
self.assertFalse(value.GetError().Success())
|
llvm-mirror/lldb
|
packages/Python/lldbsuite/test/commands/expression/scoped_enums/TestScopedEnumType.py
|
Python
|
apache-2.0
| 1,570
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import AssetServiceTransport
from .grpc import AssetServiceGrpcTransport
from .grpc_asyncio import AssetServiceGrpcAsyncIOTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[AssetServiceTransport]]
_transport_registry["grpc"] = AssetServiceGrpcTransport
_transport_registry["grpc_asyncio"] = AssetServiceGrpcAsyncIOTransport
__all__ = (
"AssetServiceTransport",
"AssetServiceGrpcTransport",
"AssetServiceGrpcAsyncIOTransport",
)
|
googleapis/python-asset
|
google/cloud/asset_v1p5beta1/services/asset_service/transports/__init__.py
|
Python
|
apache-2.0
| 1,176
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from keystone import middleware
from keystone import tests
class FakeApp(object):
"""Fakes a WSGI app URL normalized."""
def __call__(self, env, start_response):
resp = webob.Response()
resp.body = 'SUCCESS'
return resp(env, start_response)
class UrlMiddlewareTest(tests.TestCase):
def setUp(self):
self.middleware = middleware.NormalizingFilter(FakeApp())
self.response_status = None
self.response_headers = None
super(UrlMiddlewareTest, self).setUp()
def start_fake_response(self, status, headers):
self.response_status = int(status.split(' ', 1)[0])
self.response_headers = dict(headers)
def test_trailing_slash_normalization(self):
"""Tests /v2.0/tokens and /v2.0/tokens/ normalized URLs match."""
req1 = webob.Request.blank('/v2.0/tokens')
req2 = webob.Request.blank('/v2.0/tokens/')
self.middleware(req1.environ, self.start_fake_response)
self.middleware(req2.environ, self.start_fake_response)
self.assertEqual(req1.path_url, req2.path_url)
self.assertEqual(req1.path_url, 'http://localhost/v2.0/tokens')
def test_rewrite_empty_path(self):
"""Tests empty path is rewritten to root."""
req = webob.Request.blank('')
self.middleware(req.environ, self.start_fake_response)
self.assertEqual(req.path_url, 'http://localhost/')
|
atheendra/access_keys
|
keystone/tests/test_url_middleware.py
|
Python
|
apache-2.0
| 2,020
|
"""Define constants for the Ambient PWS component."""
DOMAIN = "ambient_station"
ATTR_LAST_DATA = "last_data"
CONF_APP_KEY = "app_key"
DATA_CLIENT = "data_client"
TOPIC_UPDATE = "update"
TYPE_BINARY_SENSOR = "binary_sensor"
TYPE_SENSOR = "sensor"
|
leppa/home-assistant
|
homeassistant/components/ambient_station/const.py
|
Python
|
apache-2.0
| 252
|
#!/usr/bin/env python
"""
@package mi.dataset.parser.test.test_adcpt_acfgm
@fid marine-integrations/mi/dataset/parser/test/test_adcpt_acfgm.py
@author Ronald Ronquillo
@brief Test code for a Adcpt_Acfgm_Dcl data parser
"""
from nose.plugins.attrib import attr
import os
from mi.core.log import get_logger
from mi.dataset.parser.utilities import particle_to_yml
log = get_logger()
from mi.core.exceptions import RecoverableSampleException
from mi.dataset.test.test_parser import ParserUnitTestCase
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.driver.adcpt_acfgm.dcl.pd8.adcpt_acfgm_dcl_pd8_driver_common import \
AdcptAcfgmPd8Parser, MODULE_NAME, ADCPT_ACFGM_DCL_PD8_RECOVERED_PARTICLE_CLASS, \
ADCPT_ACFGM_DCL_PD8_TELEMETERED_PARTICLE_CLASS
from mi.dataset.driver.adcpt_acfgm.dcl.pd8.resource import RESOURCE_PATH
@attr('UNIT', group='mi')
class AdcptAcfgmPd8ParserUnitTestCase(ParserUnitTestCase):
"""
Adcpt_Acfgm_Dcl Parser unit test suite
"""
def create_parser(self, particle_class, file_handle):
"""
This function creates a AdcptAcfgmDcl parser for recovered data.
"""
parser = AdcptAcfgmPd8Parser(
{DataSetDriverConfigKeys.PARTICLE_MODULE: MODULE_NAME,
DataSetDriverConfigKeys.PARTICLE_CLASS: particle_class},
file_handle,
self.exception_callback)
return parser
def open_file(self, filename):
my_file = open(os.path.join(RESOURCE_PATH, filename), mode='rU')
return my_file
def setUp(self):
ParserUnitTestCase.setUp(self)
def create_yml(self, particles, filename):
particle_to_yml(particles, os.path.join(RESOURCE_PATH, filename))
def test_parse_input(self):
"""
Read a large file and verify that all expected particles can be read.
Verification is not done at this time, but will be done in the
tests below.
"""
in_file = self.open_file('20131201.adcp_mod.log')
parser = self.create_parser(ADCPT_ACFGM_DCL_PD8_RECOVERED_PARTICLE_CLASS, in_file)
# In a single read, get all particles in this file.
result = parser.get_records(23)
self.assertEqual(len(result), 23)
in_file.close()
self.assertListEqual(self.exception_callback_value, [])
def test_recov(self):
"""
Read a file and pull out multiple data particles at one time.
Verify that the results are those we expected.
"""
in_file = self.open_file('20131201.adcp_mod.log')
parser = self.create_parser(ADCPT_ACFGM_DCL_PD8_RECOVERED_PARTICLE_CLASS, in_file)
# In a single read, get all particles for this file.
result = parser.get_records(23)
self.assertEqual(len(result), 23)
self.assert_particles(result, '20131201.adcp_mod_recov.yml', RESOURCE_PATH)
self.assertListEqual(self.exception_callback_value, [])
in_file.close()
def test_telem(self):
"""
Read a file and pull out multiple data particles at one time.
Verify that the results are those we expected.
"""
in_file = self.open_file('20131201.adcp_mod.log')
parser = self.create_parser(ADCPT_ACFGM_DCL_PD8_TELEMETERED_PARTICLE_CLASS, in_file)
# In a single read, get all particles for this file.
result = parser.get_records(23)
self.assertEqual(len(result), 23)
self.assert_particles(result, '20131201.adcp_mod.yml', RESOURCE_PATH)
self.assertListEqual(self.exception_callback_value, [])
in_file.close()
def test_bad_data(self):
"""
Ensure that bad data is skipped when it exists.
"""
# Line 1: DCL Log missing opening square bracket
# Line 40: Timestamp day has a float
# Line 79: Heading is not a float
# Line 118: Temp is not a float
# Line 119: Header typo
# Line 158: Timestamp has non digit
# Line 197: Timestamp missing milliseconds
# Line 234: Bin missing
# Line 272: Dir missing
# Line 310: Mag missing
# Line 348: E/W missing
# Line 386: N/S missing
# Line 424: Vert missing
# Line 462: Err missing
# Line 500: Echo1 missing
# Line 538: Echo2 missing
# Line 576: Echo3 missing
# Line 614: Echo4 missing
# Line 652: Dir is not a float
# Line 690: Dir has a non digit
# Line 728: Mag is not a float
# Line 766: Mag has a non digit
# Line 804: E/W is a float
# Line 842: E/W has a non digit
# Line 880: N/S is a float
# Line 918: N/S is a non digit
# Line 956: Vert is a float
# Line 994: Vert is a non digit
# Line 1032: Err is a float
# Line 1070: Err has a non digit
# Line 1108: Echo1 is a float
# Line 1146: Echo1 has a non digit
# Line 1184: Echo2 is a float
# Line 1222: Echo2 has a non digit
# Line 1260: Echo3 is negative
# Line 1298: Timestamp missing secconds
# Line 1331: DCL Logging missing closing square bracket
# Line 1384: Ensemble number is a float
# Line 1409: Pitch is not a float
# Line 1448: Speed of sound is a float
# Line 1485: Roll is not a float
# Line 1523: Heading has a non digit
# Line 1561: Pitch has a non digit
# Line 1599: Roll has a non digit
fid = open(os.path.join(RESOURCE_PATH, '20131201.adcp_corrupt.log'), 'rb')
parser = self.create_parser(ADCPT_ACFGM_DCL_PD8_RECOVERED_PARTICLE_CLASS, fid)
parser.get_records(66)
for i in range(len(self.exception_callback_value)):
self.assert_(isinstance(self.exception_callback_value[i], RecoverableSampleException))
log.debug('Exception: %s', self.exception_callback_value[i])
self.assert_(isinstance(self.exception_callback_value[0], RecoverableSampleException))
fid.close()
def test_telem_3021(self):
"""
Read a file and pull out multiple data particles at one time.
Verify that the results are those we expected.
This test uses a real file from a deployment.
Used to verify fixes in responses to Redmine # 3021
"""
in_file = self.open_file('20141208.adcp.log')
parser = self.create_parser(ADCPT_ACFGM_DCL_PD8_TELEMETERED_PARTICLE_CLASS, in_file)
# In a single read, get all particles for this file.
result = parser.get_records(23)
self.assertEqual(len(result), 14)
self.assert_particles(result, '20141208.adcp.yml', RESOURCE_PATH)
self.assertListEqual(self.exception_callback_value, [])
in_file.close()
def test_telem_9692(self):
"""
Test to verify change made to dcl_file_common.py works with DCL
timestamps containing seconds >59
"""
in_file = self.open_file('20131201.adcpA.log')
parser = self.create_parser(ADCPT_ACFGM_DCL_PD8_TELEMETERED_PARTICLE_CLASS, in_file)
# In a single read, get all particles for this file.
result = parser.get_records(20)
self.assertEqual(len(result), 1)
self.assertListEqual(self.exception_callback_value, [])
in_file.close()
|
janeen666/mi-instrument
|
mi/dataset/parser/test/test_adcpt_acfgm_dcl_pd8.py
|
Python
|
bsd-2-clause
| 7,347
|
# -*- coding: utf-8 -*-
"""
logbook.ticketing
~~~~~~~~~~~~~~~~~
Implements long handlers that write to remote data stores and assign
each logging message a ticket id.
:copyright: (c) 2010 by Armin Ronacher, Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
from time import time
import json
from logbook.base import NOTSET, level_name_property, LogRecord
from logbook.handlers import Handler, HashingHandlerMixin
from logbook.helpers import cached_property, b, PY2, u
class Ticket(object):
"""Represents a ticket from the database."""
level_name = level_name_property()
def __init__(self, db, row):
self.db = db
self.__dict__.update(row)
@cached_property
def last_occurrence(self):
"""The last occurrence."""
rv = self.get_occurrences(limit=1)
if rv:
return rv[0]
def get_occurrences(self, order_by='-time', limit=50, offset=0):
"""Returns the occurrences for this ticket."""
return self.db.get_occurrences(self.ticket_id, order_by, limit, offset)
def solve(self):
"""Marks this ticket as solved."""
self.db.solve_ticket(self.ticket_id)
self.solved = True
def delete(self):
"""Deletes the ticket from the database."""
self.db.delete_ticket(self.ticket_id)
# Silence DeprecationWarning
__hash__ = None
def __eq__(self, other):
equal = True
for key in self.__dict__.keys():
if getattr(self, key) != getattr(other, key):
equal = False
break
return equal
def __ne__(self, other):
return not self.__eq__(other)
class Occurrence(LogRecord):
"""Represents an occurrence of a ticket."""
def __init__(self, db, row):
self.update_from_dict(json.loads(row['data']))
self.db = db
self.time = row['time']
self.ticket_id = row['ticket_id']
self.occurrence_id = row['occurrence_id']
class BackendBase(object):
"""Provides an abstract interface to various databases."""
def __init__(self, **options):
self.options = options
self.setup_backend()
def setup_backend(self):
"""Setup the database backend."""
raise NotImplementedError()
def record_ticket(self, record, data, hash, app_id):
"""Records a log record as ticket."""
raise NotImplementedError()
def count_tickets(self):
"""Returns the number of tickets."""
raise NotImplementedError()
def get_tickets(self, order_by='-last_occurrence_time',
limit=50, offset=0):
"""Selects tickets from the database."""
raise NotImplementedError()
def solve_ticket(self, ticket_id):
"""Marks a ticket as solved."""
raise NotImplementedError()
def delete_ticket(self, ticket_id):
"""Deletes a ticket from the database."""
raise NotImplementedError()
def get_ticket(self, ticket_id):
"""Return a single ticket with all occurrences."""
raise NotImplementedError()
def get_occurrences(self, ticket, order_by='-time', limit=50, offset=0):
"""Selects occurrences from the database for a ticket."""
raise NotImplementedError()
class SQLAlchemyBackend(BackendBase):
"""Implements a backend that is writing into a database SQLAlchemy can
interface.
This backend takes some additional options:
`table_prefix`
an optional table prefix for all tables created by
the logbook ticketing handler.
`metadata`
an optional SQLAlchemy metadata object for the table creation.
`autocreate_tables`
can be set to `False` to disable the automatic
creation of the logbook tables.
"""
def setup_backend(self):
from sqlalchemy import create_engine, MetaData
from sqlalchemy.orm import sessionmaker, scoped_session
engine_or_uri = self.options.pop('uri', None)
metadata = self.options.pop('metadata', None)
table_prefix = self.options.pop('table_prefix', 'logbook_')
if hasattr(engine_or_uri, 'execute'):
self.engine = engine_or_uri
else:
# Pool recycle keeps connections from going stale,
# which happens in MySQL Databases
# Pool size is more custom for out stack
self.engine = create_engine(engine_or_uri, convert_unicode=True,
pool_recycle=360, pool_size=1000)
# Create session factory using session maker
session = sessionmaker()
# Bind to the engined
session.configure(bind=self.engine)
# Scoped session is a thread safe solution for
# interaction with the Database
self.session = scoped_session(session)
if metadata is None:
metadata = MetaData()
self.table_prefix = table_prefix
self.metadata = metadata
self.create_tables()
if self.options.get('autocreate_tables', True):
self.metadata.create_all(bind=self.engine)
def create_tables(self):
"""Creates the tables required for the handler on the class and
metadata.
"""
import sqlalchemy as db
def table(name, *args, **kwargs):
return db.Table(self.table_prefix + name, self.metadata,
*args, **kwargs)
self.tickets = table('tickets',
db.Column('ticket_id', db.Integer,
primary_key=True),
db.Column('record_hash', db.String(40),
unique=True),
db.Column('level', db.Integer),
db.Column('channel', db.String(120)),
db.Column('location', db.String(512)),
db.Column('module', db.String(256)),
db.Column('last_occurrence_time', db.DateTime),
db.Column('occurrence_count', db.Integer),
db.Column('solved', db.Boolean),
db.Column('app_id', db.String(80)))
self.occurrences = table('occurrences',
db.Column('occurrence_id',
db.Integer, primary_key=True),
db.Column('ticket_id', db.Integer,
db.ForeignKey(self.table_prefix +
'tickets.ticket_id')),
db.Column('time', db.DateTime),
db.Column('data', db.Text),
db.Column('app_id', db.String(80)))
def _order(self, q, table, order_by):
if order_by[0] == '-':
return q.order_by(table.c[order_by[1:]].desc())
return q.order_by(table.c[order_by])
def record_ticket(self, record, data, hash, app_id):
"""Records a log record as ticket."""
# Can use the session instead engine.connection and transaction
s = self.session
try:
q = self.tickets.select(self.tickets.c.record_hash == hash)
row = s.execute(q).fetchone()
if row is None:
row = s.execute(self.tickets.insert().values(
record_hash=hash,
level=record.level,
channel=record.channel or u(''),
location=u('%s:%d') % (record.filename, record.lineno),
module=record.module or u('<unknown>'),
occurrence_count=0,
solved=False,
app_id=app_id
))
ticket_id = row.inserted_primary_key[0]
else:
ticket_id = row['ticket_id']
s.execute(self.occurrences.insert()
.values(ticket_id=ticket_id,
time=record.time,
app_id=app_id,
data=json.dumps(data)))
s.execute(
self.tickets.update()
.where(self.tickets.c.ticket_id == ticket_id)
.values(occurrence_count=self.tickets.c.occurrence_count + 1,
last_occurrence_time=record.time,
solved=False))
s.commit()
except Exception:
s.rollback()
raise
# Closes the session and removes it from the pool
s.remove()
def count_tickets(self):
"""Returns the number of tickets."""
return self.engine.execute(self.tickets.count()).fetchone()[0]
def get_tickets(self, order_by='-last_occurrence_time', limit=50,
offset=0):
"""Selects tickets from the database."""
return [Ticket(self, row) for row in self.engine.execute(
self._order(self.tickets.select(), self.tickets, order_by)
.limit(limit).offset(offset)).fetchall()]
def solve_ticket(self, ticket_id):
"""Marks a ticket as solved."""
self.engine.execute(self.tickets.update()
.where(self.tickets.c.ticket_id == ticket_id)
.values(solved=True))
def delete_ticket(self, ticket_id):
"""Deletes a ticket from the database."""
self.engine.execute(self.occurrences.delete()
.where(self.occurrences.c.ticket_id == ticket_id))
self.engine.execute(self.tickets.delete()
.where(self.tickets.c.ticket_id == ticket_id))
def get_ticket(self, ticket_id):
"""Return a single ticket with all occurrences."""
row = self.engine.execute(self.tickets.select().where(
self.tickets.c.ticket_id == ticket_id)).fetchone()
if row is not None:
return Ticket(self, row)
def get_occurrences(self, ticket, order_by='-time', limit=50, offset=0):
"""Selects occurrences from the database for a ticket."""
return [Occurrence(self, row) for row in
self.engine.execute(self._order(
self.occurrences.select()
.where(self.occurrences.c.ticket_id == ticket),
self.occurrences, order_by)
.limit(limit).offset(offset)).fetchall()]
class MongoDBBackend(BackendBase):
"""Implements a backend that writes into a MongoDB database."""
class _FixedTicketClass(Ticket):
@property
def ticket_id(self):
return self._id
class _FixedOccurrenceClass(Occurrence):
def __init__(self, db, row):
self.update_from_dict(json.loads(row['data']))
self.db = db
self.time = row['time']
self.ticket_id = row['ticket_id']
self.occurrence_id = row['_id']
# TODO: Update connection setup once PYTHON-160 is solved.
def setup_backend(self):
from pymongo import ASCENDING, DESCENDING
from pymongo.connection import Connection
try:
from pymongo.uri_parser import parse_uri
except ImportError:
from pymongo.connection import _parse_uri as parse_uri
from pymongo.errors import AutoReconnect
_connection = None
uri = self.options.pop('uri', u(''))
_connection_attempts = 0
parsed_uri = parse_uri(uri, Connection.PORT)
if type(parsed_uri) is tuple:
# pymongo < 2.0
database = parsed_uri[1]
else:
# pymongo >= 2.0
database = parsed_uri['database']
# Handle auto reconnect signals properly
while _connection_attempts < 5:
try:
if _connection is None:
_connection = Connection(uri)
database = _connection[database]
break
except AutoReconnect:
_connection_attempts += 1
time.sleep(0.1)
self.database = database
# setup correct indexes
database.tickets.ensure_index([('record_hash', ASCENDING)],
unique=True)
database.tickets.ensure_index([('solved', ASCENDING),
('level', ASCENDING)])
database.occurrences.ensure_index([('time', DESCENDING)])
def _order(self, q, order_by):
from pymongo import ASCENDING, DESCENDING
col = '%s' % (order_by[0] == '-' and order_by[1:] or order_by)
if order_by[0] == '-':
return q.sort(col, DESCENDING)
return q.sort(col, ASCENDING)
def _oid(self, ticket_id):
from pymongo.objectid import ObjectId
return ObjectId(ticket_id)
def record_ticket(self, record, data, hash, app_id):
"""Records a log record as ticket."""
db = self.database
ticket = db.tickets.find_one({'record_hash': hash})
if not ticket:
doc = {
'record_hash': hash,
'level': record.level,
'channel': record.channel or u(''),
'location': u('%s:%d') % (record.filename,
record.lineno),
'module': record.module or u('<unknown>'),
'occurrence_count': 0,
'solved': False,
'app_id': app_id,
}
ticket_id = db.tickets.insert(doc)
else:
ticket_id = ticket['_id']
db.tickets.update({'_id': ticket_id}, {
'$inc': {
'occurrence_count': 1
},
'$set': {
'last_occurrence_time': record.time,
'solved': False
}
})
# We store occurrences in a seperate collection so that
# we can make it a capped collection optionally.
db.occurrences.insert({
'ticket_id': self._oid(ticket_id),
'app_id': app_id,
'time': record.time,
'data': json.dumps(data),
})
def count_tickets(self):
"""Returns the number of tickets."""
return self.database.tickets.count()
def get_tickets(self, order_by='-last_occurrence_time', limit=50,
offset=0):
"""Selects tickets from the database."""
query = (self._order(self.database.tickets.find(), order_by)
.limit(limit).skip(offset))
return [self._FixedTicketClass(self, obj) for obj in query]
def solve_ticket(self, ticket_id):
"""Marks a ticket as solved."""
self.database.tickets.update({'_id': self._oid(ticket_id)},
{'solved': True})
def delete_ticket(self, ticket_id):
"""Deletes a ticket from the database."""
self.database.occurrences.remove({'ticket_id': self._oid(ticket_id)})
self.database.tickets.remove({'_id': self._oid(ticket_id)})
def get_ticket(self, ticket_id):
"""Return a single ticket with all occurrences."""
ticket = self.database.tickets.find_one({'_id': self._oid(ticket_id)})
if ticket:
return Ticket(self, ticket)
def get_occurrences(self, ticket, order_by='-time', limit=50, offset=0):
"""Selects occurrences from the database for a ticket."""
collection = self.database.occurrences
occurrences = self._order(collection.find(
{'ticket_id': self._oid(ticket)}
), order_by).limit(limit).skip(offset)
return [self._FixedOccurrenceClass(self, obj) for obj in occurrences]
class TicketingBaseHandler(Handler, HashingHandlerMixin):
"""Baseclass for ticketing handlers. This can be used to interface
ticketing systems that do not necessarily provide an interface that
would be compatible with the :class:`BackendBase` interface.
"""
def __init__(self, hash_salt, level=NOTSET, filter=None, bubble=False):
Handler.__init__(self, level, filter, bubble)
self.hash_salt = hash_salt
def hash_record_raw(self, record):
"""Returns the unique hash of a record."""
hash = HashingHandlerMixin.hash_record_raw(self, record)
if self.hash_salt is not None:
hash_salt = self.hash_salt
if not PY2 or isinstance(hash_salt, unicode):
hash_salt = hash_salt.encode('utf-8')
hash.update(b('\x00') + hash_salt)
return hash
class TicketingHandler(TicketingBaseHandler):
"""A handler that writes log records into a remote database. This
database can be connected to from different dispatchers which makes
this a nice setup for web applications::
from logbook.ticketing import TicketingHandler
handler = TicketingHandler('sqlite:////tmp/myapp-logs.db')
:param uri: a backend specific string or object to decide where to log to.
:param app_id: a string with an optional ID for an application. Can be
used to keep multiple application setups apart when logging
into the same database.
:param hash_salt: an optional salt (binary string) for the hashes.
:param backend: A backend class that implements the proper database
handling.
Backends available are: :class:`SQLAlchemyBackend`,
:class:`MongoDBBackend`.
"""
#: The default backend that is being used when no backend is specified.
#: Unless overriden by a subclass this will be the
#: :class:`SQLAlchemyBackend`.
default_backend = SQLAlchemyBackend
def __init__(self, uri, app_id='generic', level=NOTSET,
filter=None, bubble=False, hash_salt=None, backend=None,
**db_options):
if hash_salt is None:
hash_salt = u('apphash-') + app_id
TicketingBaseHandler.__init__(self, hash_salt, level, filter, bubble)
if backend is None:
backend = self.default_backend
db_options['uri'] = uri
self.set_backend(backend, **db_options)
self.app_id = app_id
def set_backend(self, cls, **options):
self.db = cls(**options)
def process_record(self, record, hash):
"""Subclasses can override this to tamper with the data dict that
is sent to the database as JSON.
"""
return record.to_dict(json_safe=True)
def record_ticket(self, record, data, hash):
"""Record either a new ticket or a new occurrence for a
ticket based on the hash.
"""
self.db.record_ticket(record, data, hash, self.app_id)
def emit(self, record):
"""Emits a single record and writes it to the database."""
hash = self.hash_record(record).encode('utf-8')
data = self.process_record(record, hash)
self.record_ticket(record, data, hash)
|
RazerM/logbook
|
logbook/ticketing.py
|
Python
|
bsd-3-clause
| 19,231
|
"""
ML Estimation of Spatial Error Model
"""
__author__ = "Luc Anselin luc.anselin@asu.edu, Serge Rey srey@asu.edu"
import numpy as np
import numpy.linalg as la
import pysal as ps
from pysal.spreg.utils import RegressionPropsY, RegressionPropsVM
import diagnostics as DIAG
import user_output as USER
import summary_output as SUMMARY
import regimes as REGI
from w_utils import symmetrize
try:
from scipy.optimize import minimize_scalar
minimize_scalar_available = True
except ImportError:
minimize_scalar_available = False
__all__ = ["ML_Error"]
class BaseML_Error(RegressionPropsY, RegressionPropsVM, REGI.Regimes_Frame):
"""
ML estimation of the spatial error model (note no consistency
checks, diagnostics or constants added); Anselin (1988) [1]_
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
w : Sparse matrix
Spatial weights sparse matrix
method : string
if 'full', brute force calculation (full matrix expressions)
epsilon : float
tolerance criterion in mimimize_scalar function and inverse_product
regimes_att : dictionary
Dictionary containing elements to be used in case of a regimes model,
i.e. 'x' before regimes, 'regimes' list and 'cols2regi'
Attributes
----------
betas : array
kx1 array of estimated coefficients
lam : float
estimate of spatial autoregressive coefficient
u : array
nx1 array of residuals
e_filtered : array
spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant, excluding the rho)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
method : string
log Jacobian method
if 'full': brute force (full matrix computations)
if 'ord' : Ord eigenvalue method
epsilon : float
tolerance criterion used in minimize_scalar function and inverse_product
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
vm : array
Variance covariance matrix (k+1 x k+1) - includes lambda
vm1 : array
2x2 array of variance covariance for lambda, sigma
sig2 : float
Sigma squared used in computations
logll : float
maximized log-likelihood (including constant terms)
Examples
--------
>>> import numpy as np
>>> import pysal as ps
>>> np.set_printoptions(suppress=True) #prevent scientific format
>>> db = ps.open(ps.examples.get_path("south.dbf"),'r')
>>> y_name = "HR90"
>>> y = np.array(db.by_col(y_name))
>>> y.shape = (len(y),1)
>>> x_names = ["RD90","PS90","UE90","DV90"]
>>> x = np.array([db.by_col(var) for var in x_names]).T
>>> x = np.hstack((np.ones((len(y),1)),x))
>>> ww = ps.open(ps.examples.get_path("south_q.gal"))
>>> w = ww.read()
>>> ww.close()
>>> w.transform = 'r'
>>> mlerr = BaseML_Error(y,x,w) #doctest: +SKIP
>>> "{0:.6f}".format(mlerr.lam) #doctest: +SKIP
'0.299078'
>>> np.around(mlerr.betas, decimals=4) #doctest: +SKIP
array([[ 6.1492],
[ 4.4024],
[ 1.7784],
[-0.3781],
[ 0.4858],
[ 0.2991]])
>>> "{0:.6f}".format(mlerr.mean_y) #doctest: +SKIP
'9.549293'
>>> "{0:.6f}".format(mlerr.std_y) #doctest: +SKIP
'7.038851'
>>> np.diag(mlerr.vm) #doctest: +SKIP
array([ 1.06476526, 0.05548248, 0.04544514, 0.00614425, 0.01481356,
0.00143001])
>>> "{0:.6f}".format(mlerr.sig2[0][0]) #doctest: +SKIP
'32.406854'
>>> "{0:.6f}".format(mlerr.logll) #doctest: +SKIP
'-4471.407067'
>>> mlerr1 = BaseML_Error(y,x,w,method='ord') #doctest: +SKIP
>>> "{0:.6f}".format(mlerr1.lam) #doctest: +SKIP
'0.299078'
>>> np.around(mlerr1.betas, decimals=4) #doctest: +SKIP
array([[ 6.1492],
[ 4.4024],
[ 1.7784],
[-0.3781],
[ 0.4858],
[ 0.2991]])
>>> "{0:.6f}".format(mlerr1.mean_y) #doctest: +SKIP
'9.549293'
>>> "{0:.6f}".format(mlerr1.std_y) #doctest: +SKIP
'7.038851'
>>> np.around(np.diag(mlerr1.vm), decimals=4) #doctest: +SKIP
array([ 1.0648, 0.0555, 0.0454, 0.0061, 0.0148, 0.0014])
>>> "{0:.4f}".format(mlerr1.sig2[0][0]) #doctest: +SKIP
'32.4069'
>>> "{0:.4f}".format(mlerr1.logll) #doctest: +SKIP
'-4471.4071'
References
----------
.. [1] Anselin, L. (1988) "Spatial Econometrics: Methods and Models".
Kluwer Academic Publishers. Dordrecht.
"""
def __init__(self, y, x, w, method='full', epsilon=0.0000001, regimes_att=None):
# set up main regression variables and spatial filters
self.y = y
if regimes_att:
self.x = x.toarray()
else:
self.x = x
self.n, self.k = self.x.shape
self.method = method
self.epsilon = epsilon
W = w.full()[0]
ylag = ps.lag_spatial(w, self.y)
xlag = self.get_x_lag(w, regimes_att)
# call minimizer using concentrated log-likelihood to get lambda
methodML = method.upper()
if methodML in ['FULL', 'ORD']:
if methodML == 'FULL':
res = minimize_scalar(err_c_loglik, 0.0, bounds=(-1.0, 1.0),
args=(self.n, self.y, ylag, self.x,
xlag, W), method='bounded',
tol=epsilon)
elif methodML == 'ORD':
# check on symmetry structure
if w.asymmetry(intrinsic=False) == []:
ww = symmetrize(w)
WW = ww.todense()
evals = la.eigvalsh(WW)
else:
evals = la.eigvals(W)
res = minimize_scalar(
err_c_loglik_ord, 0.0, bounds=(-1.0, 1.0),
args=(self.n, self.y, ylag, self.x,
xlag, evals), method='bounded',
tol=epsilon)
else:
raise Exception, "{0} is an unsupported method".format(method)
self.lam = res.x
# compute full log-likelihood, including constants
ln2pi = np.log(2.0 * np.pi)
llik = -res.fun - self.n / 2.0 * ln2pi - self.n / 2.0
self.logll = llik
# b, residuals and predicted values
ys = self.y - self.lam * ylag
xs = self.x - self.lam * xlag
xsxs = np.dot(xs.T, xs)
xsxsi = np.linalg.inv(xsxs)
xsys = np.dot(xs.T, ys)
b = np.dot(xsxsi, xsys)
self.betas = np.vstack((b, self.lam))
self.u = y - np.dot(self.x, b)
self.predy = self.y - self.u
# residual variance
self.e_filtered = self.u - self.lam * ps.lag_spatial(w, self.u)
self.sig2 = np.dot(self.e_filtered.T, self.e_filtered) / self.n
# variance-covariance matrix betas
varb = self.sig2 * xsxsi
# variance-covariance matrix lambda, sigma
a = -self.lam * W
np.fill_diagonal(a, 1.0)
ai = la.inv(a)
wai = np.dot(W, ai)
tr1 = np.trace(wai)
wai2 = np.dot(wai, wai)
tr2 = np.trace(wai2)
waiTwai = np.dot(wai.T, wai)
tr3 = np.trace(waiTwai)
v1 = np.vstack((tr2 + tr3,
tr1 / self.sig2))
v2 = np.vstack((tr1 / self.sig2,
self.n / (2.0 * self.sig2 ** 2)))
v = np.hstack((v1, v2))
self.vm1 = np.linalg.inv(v)
# create variance matrix for beta, lambda
vv = np.hstack((varb, np.zeros((self.k, 1))))
vv1 = np.hstack(
(np.zeros((1, self.k)), self.vm1[0, 0] * np.ones((1, 1))))
self.vm = np.vstack((vv, vv1))
self._cache = {}
def get_x_lag(self, w, regimes_att):
if regimes_att:
xlag = ps.lag_spatial(w, regimes_att['x'])
xlag = REGI.Regimes_Frame.__init__(self, xlag,
regimes_att['regimes'], constant_regi=None, cols2regi=regimes_att['cols2regi'])[0]
xlag = xlag.toarray()
else:
xlag = ps.lag_spatial(w, self.x)
return xlag
class ML_Error(BaseML_Error):
"""
ML estimation of the spatial lag model with all results and diagnostics;
Anselin (1988) [1]_
Parameters
----------
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, excluding the constant
w : Sparse matrix
Spatial weights sparse matrix
method : string
if 'full', brute force calculation (full matrix expressions)
ir 'ord', Ord eigenvalue method
epsilon : float
tolerance criterion in mimimize_scalar function and inverse_product
spat_diag : boolean
if True, include spatial diagnostics
vm : boolean
if True, include variance-covariance matrix in summary
results
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
Attributes
----------
betas : array
(k+1)x1 array of estimated coefficients (rho first)
lam : float
estimate of spatial autoregressive coefficient
u : array
nx1 array of residuals
e_filtered : array
nx1 array of spatially filtered residuals
predy : array
nx1 array of predicted y values
n : integer
Number of observations
k : integer
Number of variables for which coefficients are estimated
(including the constant, excluding lambda)
y : array
nx1 array for dependent variable
x : array
Two dimensional array with n rows and one column for each
independent (exogenous) variable, including the constant
method : string
log Jacobian method
if 'full': brute force (full matrix computations)
epsilon : float
tolerance criterion used in minimize_scalar function and inverse_product
mean_y : float
Mean of dependent variable
std_y : float
Standard deviation of dependent variable
varb : array
Variance covariance matrix (k+1 x k+1) - includes var(lambda)
vm1 : array
variance covariance matrix for lambda, sigma (2 x 2)
sig2 : float
Sigma squared used in computations
logll : float
maximized log-likelihood (including constant terms)
pr2 : float
Pseudo R squared (squared correlation between y and ypred)
utu : float
Sum of squared residuals
std_err : array
1xk array of standard errors of the betas
z_stat : list of tuples
z statistic; each tuple contains the pair (statistic,
p-value), where each is a float
name_y : string
Name of dependent variable for use in output
name_x : list of strings
Names of independent variables for use in output
name_w : string
Name of weights matrix for use in output
name_ds : string
Name of dataset for use in output
title : string
Name of the regression method used
Examples
--------
>>> import numpy as np
>>> import pysal as ps
>>> np.set_printoptions(suppress=True) #prevent scientific format
>>> db = ps.open(ps.examples.get_path("south.dbf"),'r')
>>> ds_name = "south.dbf"
>>> y_name = "HR90"
>>> y = np.array(db.by_col(y_name))
>>> y.shape = (len(y),1)
>>> x_names = ["RD90","PS90","UE90","DV90"]
>>> x = np.array([db.by_col(var) for var in x_names]).T
>>> ww = ps.open(ps.examples.get_path("south_q.gal"))
>>> w = ww.read()
>>> ww.close()
>>> w_name = "south_q.gal"
>>> w.transform = 'r'
>>> mlerr = ML_Error(y,x,w,name_y=y_name,name_x=x_names,\
name_w=w_name,name_ds=ds_name) #doctest: +SKIP
>>> np.around(mlerr.betas, decimals=4) #doctest: +SKIP
array([[ 6.1492],
[ 4.4024],
[ 1.7784],
[-0.3781],
[ 0.4858],
[ 0.2991]])
>>> "{0:.4f}".format(mlerr.lam) #doctest: +SKIP
'0.2991'
>>> "{0:.4f}".format(mlerr.mean_y) #doctest: +SKIP
'9.5493'
>>> "{0:.4f}".format(mlerr.std_y) #doctest: +SKIP
'7.0389'
>>> np.around(np.diag(mlerr.vm), decimals=4) #doctest: +SKIP
array([ 1.0648, 0.0555, 0.0454, 0.0061, 0.0148, 0.0014])
>>> np.around(mlerr.sig2, decimals=4) #doctest: +SKIP
array([[ 32.4069]])
>>> "{0:.4f}".format(mlerr.logll) #doctest: +SKIP
'-4471.4071'
>>> "{0:.4f}".format(mlerr.aic) #doctest: +SKIP
'8952.8141'
>>> "{0:.4f}".format(mlerr.schwarz) #doctest: +SKIP
'8979.0779'
>>> "{0:.4f}".format(mlerr.pr2) #doctest: +SKIP
'0.3058'
>>> "{0:.4f}".format(mlerr.utu) #doctest: +SKIP
'48534.9148'
>>> np.around(mlerr.std_err, decimals=4) #doctest: +SKIP
array([ 1.0319, 0.2355, 0.2132, 0.0784, 0.1217, 0.0378])
>>> np.around(mlerr.z_stat, decimals=4) #doctest: +SKIP
array([[ 5.9593, 0. ],
[ 18.6902, 0. ],
[ 8.3422, 0. ],
[ -4.8233, 0. ],
[ 3.9913, 0.0001],
[ 7.9089, 0. ]])
>>> mlerr.name_y #doctest: +SKIP
'HR90'
>>> mlerr.name_x #doctest: +SKIP
['CONSTANT', 'RD90', 'PS90', 'UE90', 'DV90', 'lambda']
>>> mlerr.name_w #doctest: +SKIP
'south_q.gal'
>>> mlerr.name_ds #doctest: +SKIP
'south.dbf'
>>> mlerr.title #doctest: +SKIP
'MAXIMUM LIKELIHOOD SPATIAL ERROR (METHOD = FULL)'
References
----------
.. [1] Anselin, L. (1988) "Spatial Econometrics: Methods and Models".
Kluwer Academic Publishers. Dordrecht.
"""
def __init__(self, y, x, w, method='full', epsilon=0.0000001,
spat_diag=False, vm=False, name_y=None, name_x=None,
name_w=None, name_ds=None):
n = USER.check_arrays(y, x)
USER.check_y(y, n)
USER.check_weights(w, y, w_required=True)
x_constant = USER.check_constant(x)
method = method.upper()
if method in ['FULL', 'ORD']:
BaseML_Error.__init__(self, y=y, x=x_constant,
w=w, method=method, epsilon=epsilon)
self.title = "MAXIMUM LIKELIHOOD SPATIAL ERROR" + \
" (METHOD = " + method + ")"
self.name_ds = USER.set_name_ds(name_ds)
self.name_y = USER.set_name_y(name_y)
self.name_x = USER.set_name_x(name_x, x)
self.name_x.append('lambda')
self.name_w = USER.set_name_w(name_w, w)
self.aic = DIAG.akaike(reg=self)
self.schwarz = DIAG.schwarz(reg=self)
SUMMARY.ML_Error(reg=self, w=w, vm=vm, spat_diag=spat_diag)
else:
raise Exception, "{0} is an unsupported method".format(method)
def err_c_loglik(lam, n, y, ylag, x, xlag, W):
# concentrated log-lik for error model, no constants, brute force
ys = y - lam * ylag
xs = x - lam * xlag
ysys = np.dot(ys.T, ys)
xsxs = np.dot(xs.T, xs)
xsxsi = np.linalg.inv(xsxs)
xsys = np.dot(xs.T, ys)
x1 = np.dot(xsxsi, xsys)
x2 = np.dot(xsys.T, x1)
ee = ysys - x2
sig2 = ee[0][0] / n
nlsig2 = (n / 2.0) * np.log(sig2)
a = -lam * W
np.fill_diagonal(a, 1.0)
jacob = np.log(np.linalg.det(a))
# this is the negative of the concentrated log lik for minimization
clik = nlsig2 - jacob
return clik
def err_c_loglik_ord(lam, n, y, ylag, x, xlag, evals):
# concentrated log-lik for error model, no constants, brute force
ys = y - lam * ylag
xs = x - lam * xlag
ysys = np.dot(ys.T, ys)
xsxs = np.dot(xs.T, xs)
xsxsi = np.linalg.inv(xsxs)
xsys = np.dot(xs.T, ys)
x1 = np.dot(xsxsi, xsys)
x2 = np.dot(xsys.T, x1)
ee = ysys - x2
sig2 = ee[0][0] / n
nlsig2 = (n / 2.0) * np.log(sig2)
revals = lam * evals
jacob = np.log(1 - revals).sum()
if isinstance(jacob, complex):
jacob = jacob.real
# this is the negative of the concentrated log lik for minimization
clik = nlsig2 - jacob
return clik
def _test():
import doctest
start_suppress = np.get_printoptions()['suppress']
np.set_printoptions(suppress=True)
doctest.testmod()
np.set_printoptions(suppress=start_suppress)
|
spreg-git/pysal
|
pysal/spreg/ml_error.py
|
Python
|
bsd-3-clause
| 18,341
|
#!/usr/bin/python
from distutils.core import setup
setup(
name='snapboard',
version='0.2.1',
author='Bo Shi',
maintainer='SNAPboard developers',
maintainer_email='snapboard-discuss@googlegroups.com',
url='http://code.google.com/p/snapboard/',
description='Bulletin board application for Django.',
long_description='''SNAPboard is forum/bulletin board application based on the Django web
framework. It integrates easily in any Django project.
Among its features are:
* Editable posts with all revisions publicly available
* Messages posted within threads can be made visible only to selected
users
* BBCode, Markdown and Textile supported for post formatting
* BBCode toolbar
* Multiple forums with four types of permissions
* Forum permissions can be assigned to custom groups of users
* Group administration can be delegated to end users on a per-group basis
* Moderators for each forum
* User preferences
* Watched topics
* Abuse reports
* User and IP address bans that don't automatically spread to other Django
applications within the project
* i18n hooks to create your own translations
* Included translations: French, Russian
SNAPboard requires Django 1.0.''',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: New BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Communications :: BBS',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: Message Boards',
],
packages=['snapboard',],
package_dir={'snapboard': 'snapboard'},
package_data={'snapboard': [
'media/*/*.*',
'media/*/*/*.*',
'templates/*.*',
'templates/snapboard/*.*',
'templates/notification/*.*',
'templates/notification/*/*.*',
]},
)
# vim: ai ts=4 sts=4 et sw=4
|
DuVale/snapboard
|
setup.py
|
Python
|
bsd-3-clause
| 2,058
|
"""Forest covertype dataset.
A classic dataset for classification benchmarks, featuring categorical and
real-valued features.
The dataset page is available from UCI Machine Learning Repository
https://archive.ics.uci.edu/ml/datasets/Covertype
Courtesy of Jock A. Blackard and Colorado State University.
"""
# Author: Lars Buitinck
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# License: BSD 3 clause
from gzip import GzipFile
import logging
from os.path import dirname, exists, join
from os import remove, makedirs
import numpy as np
import joblib
from . import get_data_home
from ._base import _convert_data_dataframe
from ._base import _fetch_remote
from ._base import RemoteFileMetadata
from ..utils import Bunch
from ._base import _pkl_filepath
from ..utils import check_random_state
from ..utils.validation import _deprecate_positional_args
# The original data can be found in:
# https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.data.gz
ARCHIVE = RemoteFileMetadata(
filename='covtype.data.gz',
url='https://ndownloader.figshare.com/files/5976039',
checksum=('614360d0257557dd1792834a85a1cdeb'
'fadc3c4f30b011d56afee7ffb5b15771'))
logger = logging.getLogger(__name__)
# Column names reference:
# https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.info
FEATURE_NAMES = ["Elevation",
"Aspect",
"Slope",
"Horizontal_Distance_To_Hydrology",
"Vertical_Distance_To_Hydrology",
"Horizontal_Distance_To_Roadways",
"Hillshade_9am",
"Hillshade_Noon",
"Hillshade_3pm",
"Horizontal_Distance_To_Fire_Points"]
FEATURE_NAMES += [f"Wilderness_Area_{i}" for i in range(4)]
FEATURE_NAMES += [f"Soil_Type_{i}" for i in range(40)]
TARGET_NAMES = ["Cover_Type"]
@_deprecate_positional_args
def fetch_covtype(*, data_home=None, download_if_missing=True,
random_state=None, shuffle=False, return_X_y=False,
as_frame=False):
"""Load the covertype dataset (classification).
Download it if necessary.
================= ============
Classes 7
Samples total 581012
Dimensionality 54
Features int
================= ============
Read more in the :ref:`User Guide <covtype_dataset>`.
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset shuffling. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
shuffle : bool, default=False
Whether to shuffle dataset.
return_X_y : bool, default=False
If True, returns ``(data.data, data.target)`` instead of a Bunch
object.
.. versionadded:: 0.20
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric). The target is a pandas DataFrame or
Series depending on the number of target columns. If `return_X_y` is
True, then (`data`, `target`) will be pandas DataFrames or Series as
described below.
.. versionadded:: 0.24
Returns
-------
dataset : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : ndarray of shape (581012, 54)
Each row corresponds to the 54 features in the dataset.
target : ndarray of shape (581012,)
Each value corresponds to one of
the 7 forest covertypes with values
ranging between 1 to 7.
frame : dataframe of shape (581012, 53)
Only present when `as_frame=True`. Contains `data` and `target`.
DESCR : str
Description of the forest covertype dataset.
feature_names : list
The names of the dataset columns.
target_names: list
The names of the target columns.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.20
"""
data_home = get_data_home(data_home=data_home)
covtype_dir = join(data_home, "covertype")
samples_path = _pkl_filepath(covtype_dir, "samples")
targets_path = _pkl_filepath(covtype_dir, "targets")
available = exists(samples_path)
if download_if_missing and not available:
if not exists(covtype_dir):
makedirs(covtype_dir)
logger.info("Downloading %s" % ARCHIVE.url)
archive_path = _fetch_remote(ARCHIVE, dirname=covtype_dir)
Xy = np.genfromtxt(GzipFile(filename=archive_path), delimiter=',')
# delete archive
remove(archive_path)
X = Xy[:, :-1]
y = Xy[:, -1].astype(np.int32, copy=False)
joblib.dump(X, samples_path, compress=9)
joblib.dump(y, targets_path, compress=9)
elif not available and not download_if_missing:
raise IOError("Data not found and `download_if_missing` is False")
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
ind = np.arange(X.shape[0])
rng = check_random_state(random_state)
rng.shuffle(ind)
X = X[ind]
y = y[ind]
module_path = dirname(__file__)
with open(join(module_path, 'descr', 'covtype.rst')) as rst_file:
fdescr = rst_file.read()
frame = None
if as_frame:
frame, X, y = _convert_data_dataframe(caller_name="fetch_covtype",
data=X,
target=y,
feature_names=FEATURE_NAMES,
target_names=TARGET_NAMES)
if return_X_y:
return X, y
return Bunch(data=X,
target=y,
frame=frame,
target_names=TARGET_NAMES,
feature_names=FEATURE_NAMES,
DESCR=fdescr)
|
xuewei4d/scikit-learn
|
sklearn/datasets/_covtype.py
|
Python
|
bsd-3-clause
| 6,567
|
from BinPy.combinational.combinational import *
|
rajathkumarmp/BinPy
|
BinPy/combinational/__init__.py
|
Python
|
bsd-3-clause
| 48
|
import datetime
from django.test import TestCase
from django.utils import timezone
from schedule.models import Event, Rule, Calendar, CalendarRelation
class TestCalendar(TestCase):
def setup(self):
pass
def __create_event(self, start, end):
data = {
'title': 'Recent Event',
'start': start,
'end': end
}
return Event.objects.create(**data)
def test_get_recent_events_without_events_is_empty(self):
calendar = Calendar()
self.assertEqual(list(calendar.get_recent()), [])
def test_get_recent_events_with_events_return_the_event(self):
pass
def test_occurrences_after_without_events_is_empty(self):
calendar = Calendar()
self.assertEqual(list(calendar.occurrences_after(timezone.now())), [])
def test_occurrences_after_with_events_after_returns_events(self):
calendar = Calendar()
calendar.save()
start_after = timezone.now() + datetime.timedelta(days=1)
end_after = start_after + datetime.timedelta(hours=1)
event = self.__create_event(start_after, end_after)
calendar.events.add(event)
occurrences = list(calendar.occurrences_after(timezone.now()))
self.assertEqual(len(occurrences), 1)
self.assertEqual(occurrences[0].start, start_after)
self.assertEqual(occurrences[0].end, end_after)
def test_occurrences_after_with_events_before_returns_empty(self):
calendar = Calendar()
calendar.save()
start_after = timezone.now() + datetime.timedelta(days=-1)
end_after = start_after + datetime.timedelta(hours=1)
event = self.__create_event(start_after, end_after)
calendar.events.add(event)
occurrences = list(calendar.occurrences_after(timezone.now()))
self.assertEqual(occurrences, [])
# self.assertEqual(list(calendar.occurrences_after(timezone.now())), [])
# def test_get_absolute_url(self):
# calendar = Calendar()
# self.assertEqual(calendar.get_absolute_url(), '')
def test_get_calendar_for_object(self):
calendar = Calendar(name='My Cal')
calendar.save()
rule = Rule()
rule.save()
calendar.create_relation(rule)
result = Calendar.objects.get_calendar_for_object(rule)
self.assertEqual(result.name, 'My Cal')
def test_get_calendar_for_object_without_calendars(self):
with self.assertRaises(Calendar.DoesNotExist):
rule = Rule()
rule.save()
Calendar.objects.get_calendar_for_object(rule)
def test_get_calendar_for_object_with_more_than_one_calendar(self):
calendar_1 = Calendar(name='My Cal 1')
calendar_1.save()
calendar_2 = Calendar(name='My Cal 2')
calendar_2.save()
rule = Rule()
rule.save()
calendar_1.create_relation(rule)
calendar_2.create_relation(rule)
with self.assertRaises(AssertionError):
result = Calendar.objects.get_calendar_for_object(rule)
def test_get_or_create_calendar_for_object_without_calendar(self):
"""
Creation test
"""
rule = Rule()
rule.save()
calendar = Calendar.objects.get_or_create_calendar_for_object(rule, name='My Cal')
self.assertEqual(calendar.name, 'My Cal')
calendar_from_rule = Calendar.objects.get_calendars_for_object(rule)[0]
self.assertEqual(calendar, calendar_from_rule)
def test_get_or_create_calendar_for_object_withouth_name(self):
"""
Test with already created calendar
"""
rule = Rule()
rule.save()
calendar = Calendar.objects.get_or_create_calendar_for_object(rule)
calendar_from_rule = Calendar.objects.get_calendars_for_object(rule)[0]
self.assertEqual(calendar, calendar_from_rule)
def test_get_calendars_for_object_without_calendars(self):
rule = Rule()
rule.save()
calendar = Calendar.objects.get_or_create_calendar_for_object(rule, name='My Cal', distinction='owner')
rule = Rule()
rule.save()
calendars = list(Calendar.objects.get_calendars_for_object(rule, distinction='owner'))
self.assertEqual(len(calendars), 0)
def test_calendar_absolute_and_event_url(self):
"""
this test seems to not make too much send, just added since an
url was with wrong reverse name.
"""
rule = Rule()
rule.save()
calendar = Calendar.objects.get_or_create_calendar_for_object(rule, name='My Cal', distinction='owner')
abs_url = calendar.get_absolute_url()
calendar.add_event_url()
relation = CalendarRelation.objects.create_relation(calendar, rule)
|
GrahamDigital/django-scheduler
|
tests/test_calendar.py
|
Python
|
bsd-3-clause
| 4,809
|
#!/usr/bin/env python
'''
This program illustrates the use of findContours and drawContours.
The original image is put up along with the image of drawn contours.
Usage:
contours.py
A trackbar is put up which controls the contour level from -3 to 3
'''
# Python 2/3 compatibility
from __future__ import print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
xrange = range
import numpy as np
import cv2
def make_image():
img = np.zeros((500, 500), np.uint8)
black, white = 0, 255
for i in xrange(6):
dx = int((i%2)*250 - 30)
dy = int((i/2.)*150)
if i == 0:
for j in xrange(11):
angle = (j+5)*np.pi/21
c, s = np.cos(angle), np.sin(angle)
x1, y1 = np.int32([dx+100+j*10-80*c, dy+100-90*s])
x2, y2 = np.int32([dx+100+j*10-30*c, dy+100-30*s])
cv2.line(img, (x1, y1), (x2, y2), white)
cv2.ellipse( img, (dx+150, dy+100), (100,70), 0, 0, 360, white, -1 )
cv2.ellipse( img, (dx+115, dy+70), (30,20), 0, 0, 360, black, -1 )
cv2.ellipse( img, (dx+185, dy+70), (30,20), 0, 0, 360, black, -1 )
cv2.ellipse( img, (dx+115, dy+70), (15,15), 0, 0, 360, white, -1 )
cv2.ellipse( img, (dx+185, dy+70), (15,15), 0, 0, 360, white, -1 )
cv2.ellipse( img, (dx+115, dy+70), (5,5), 0, 0, 360, black, -1 )
cv2.ellipse( img, (dx+185, dy+70), (5,5), 0, 0, 360, black, -1 )
cv2.ellipse( img, (dx+150, dy+100), (10,5), 0, 0, 360, black, -1 )
cv2.ellipse( img, (dx+150, dy+150), (40,10), 0, 0, 360, black, -1 )
cv2.ellipse( img, (dx+27, dy+100), (20,35), 0, 0, 360, white, -1 )
cv2.ellipse( img, (dx+273, dy+100), (20,35), 0, 0, 360, white, -1 )
return img
if __name__ == '__main__':
print(__doc__)
img = make_image()
h, w = img.shape[:2]
_, contours0, hierarchy = cv2.findContours( img.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = [cv2.approxPolyDP(cnt, 3, True) for cnt in contours0]
def update(levels):
vis = np.zeros((h, w, 3), np.uint8)
levels = levels - 3
cv2.drawContours( vis, contours, (-1, 2)[levels <= 0], (128,255,255),
3, cv2.LINE_AA, hierarchy, abs(levels) )
cv2.imshow('contours', vis)
update(3)
cv2.createTrackbar( "levels+3", "contours", 3, 7, update )
cv2.imshow('image', img)
cv2.waitKey()
cv2.destroyAllWindows()
|
makelove/OpenCV-Python-Tutorial
|
官方samples/contours.py
|
Python
|
mit
| 2,455
|
from toontown.coghq.SpecImports import *
GlobalEntities = {1000: {'type': 'levelMgr',
'name': 'LevelMgr',
'comment': '',
'parentEntId': 0,
'cogLevel': 0,
'farPlaneDistance': 1500,
'modelFilename': 'phase_11/models/lawbotHQ/LB_Zone04a',
'wantDoors': 1},
1001: {'type': 'editMgr',
'name': 'EditMgr',
'parentEntId': 0,
'insertEntity': None,
'removeEntity': None,
'requestNewEntity': None,
'requestSave': None},
0: {'type': 'zone',
'name': 'UberZone',
'comment': '',
'parentEntId': 0,
'scale': 1,
'description': '',
'visibility': []},
100018: {'type': 'button',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(54.7666, 7.03896, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(3, 3, 3),
'color': Vec4(1, 1, 1, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': -1.0},
100019: {'type': 'button',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-58.0835, 7.37219, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(3, 3, 3),
'color': Vec4(1, 1, 1, 1),
'isOn': 0,
'isOnEvent': 0,
'secondsOn': -1.0},
100015: {'type': 'door',
'name': '<unnamed>',
'comment': '',
'parentEntId': 100005,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'color': Vec4(1, 1, 1, 1),
'isLock0Unlocked': 0,
'isLock1Unlocked': 1,
'isLock2Unlocked': 0,
'isLock3Unlocked': 1,
'isOpen': 0,
'isOpenEvent': 0,
'isVisBlocker': 1,
'secondsOpen': 1,
'unlock0Event': 100016,
'unlock1Event': 0,
'unlock2Event': 100017,
'unlock3Event': 0},
100016: {'type': 'laserField',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(-15.1345, -13.2285, 0.25),
'hpr': Point3(90, 0, 0),
'scale': Vec3(1, 1, 1),
'cellId': 0,
'gridGame': 'Random',
'gridScaleX': 42.0,
'gridScaleY': 40.0,
'laserFactor': 3,
'modelPath': 0,
'projector': Point3(20, 40, 45),
'switchId': 100019},
100017: {'type': 'laserField',
'name': '<unnamed>',
'comment': '',
'parentEntId': 0,
'pos': Point3(11.2941, 28.7739, 0.28),
'hpr': Vec3(270, 0, 0),
'scale': Vec3(1, 1, 1),
'cellId': 1,
'gridGame': 'Random',
'gridScaleX': 42.0,
'gridScaleY': 40.0,
'laserFactor': 3,
'modelPath': 0,
'projector': Point3(20, 40, 45),
'switchId': 100018},
100001: {'type': 'model',
'name': 'copy of partition (3)',
'comment': '',
'parentEntId': 100000,
'pos': Point3(-8.98508, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100002: {'type': 'model',
'name': 'copy of partition (4)',
'comment': '',
'parentEntId': 100000,
'pos': Point3(5.36486, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100003: {'type': 'model',
'name': 'copy of partition (5)',
'comment': '',
'parentEntId': 100000,
'pos': Point3(20.1513, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100004: {'type': 'model',
'name': 'copy of partition (6)',
'comment': '',
'parentEntId': 100000,
'pos': Point3(34.9439, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100007: {'type': 'model',
'name': 'copy of partition',
'comment': '',
'parentEntId': 100006,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100008: {'type': 'model',
'name': 'copy of partition (2)',
'comment': '',
'parentEntId': 100006,
'pos': Point3(-14.9029, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Point3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100009: {'type': 'model',
'name': 'copy of partition (3)',
'comment': '',
'parentEntId': 100006,
'pos': Point3(-29.7119, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100010: {'type': 'model',
'name': 'copy of partition (4)',
'comment': '',
'parentEntId': 100006,
'pos': Point3(-44.4821, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100012: {'type': 'model',
'name': 'copy of partition (3)',
'comment': '',
'parentEntId': 100011,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100013: {'type': 'model',
'name': 'copy of partition (4)',
'comment': '',
'parentEntId': 100011,
'pos': Point3(-14.9149, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100014: {'type': 'model',
'name': 'copy of partition (5)',
'comment': '',
'parentEntId': 100011,
'pos': Point3(-29.7289, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100023: {'type': 'model',
'name': 'copy of partition (6)',
'comment': '',
'parentEntId': 100011,
'pos': Point3(-44.4361, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100025: {'type': 'model',
'name': 'copy of partition (7)',
'comment': '',
'parentEntId': 100000,
'pos': Point3(42.3323, -38.4749, 0),
'hpr': Vec3(270, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100026: {'type': 'model',
'name': 'copy of partition (8)',
'comment': '',
'parentEntId': 100000,
'pos': Point3(42.3323, -25.7861, 0),
'hpr': Vec3(270, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100027: {'type': 'model',
'name': 'copy of partition (5)',
'comment': '',
'parentEntId': 100006,
'pos': Point3(-52.4944, -39.8953, 0),
'hpr': Vec3(270.818, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100028: {'type': 'model',
'name': 'copy of partition (6)',
'comment': '',
'parentEntId': 100006,
'pos': Point3(-52.4944, -25.343, 0),
'hpr': Vec3(270.818, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100029: {'type': 'model',
'name': 'scenary',
'comment': '',
'parentEntId': 0,
'pos': Point3(-38.0023, -20.533, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_couchA.bam'},
100030: {'type': 'model',
'name': 'copy of scenary',
'comment': '',
'parentEntId': 0,
'pos': Point3(-28.9763, -28.2505, 0),
'hpr': Vec3(270, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_couchA.bam'},
100031: {'type': 'model',
'name': 'copy of scenary (2)',
'comment': '',
'parentEntId': 0,
'pos': Point3(-51.8392, -28.2505, 0),
'hpr': Vec3(131.894, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_bookshelfB.bam'},
100032: {'type': 'model',
'name': 'copy of scenary (3)',
'comment': '',
'parentEntId': 0,
'pos': Point3(-44.1843, -37.0309, 0),
'hpr': Vec3(131.894, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_bookshelfB.bam'},
100033: {'type': 'model',
'name': 'copy of scenary (4)',
'comment': '',
'parentEntId': 100034,
'pos': Point3(16.5447, -16.1896, 0),
'hpr': Vec3(358.668, 0, 0),
'scale': Point3(2, 2, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_filing_cabA.bam'},
100037: {'type': 'model',
'name': 'partition',
'comment': '',
'parentEntId': 100036,
'pos': Point3(-14.6846, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100038: {'type': 'model',
'name': 'copy of partition',
'comment': '',
'parentEntId': 100036,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100039: {'type': 'model',
'name': 'copy of partition',
'comment': '',
'parentEntId': 100036,
'pos': Point3(15.087, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100040: {'type': 'model',
'name': 'copy of partition (2)',
'comment': '',
'parentEntId': 100036,
'pos': Point3(-29.9877, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100041: {'type': 'model',
'name': 'copy of scenary (5)',
'comment': '',
'parentEntId': 100034,
'pos': Point3(20.0815, -16.1896, 0),
'hpr': Vec3(358.668, 0, 0),
'scale': Point3(2, 2, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_filing_cabA.bam'},
100042: {'type': 'model',
'name': 'copy of scenary (6)',
'comment': '',
'parentEntId': 100034,
'pos': Point3(31.1973, -15.8974, 0),
'hpr': Vec3(358.668, 0, 0),
'scale': Point3(1, 1, 1),
'collisionsOnly': 0,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/LB_bookshelfA.bam'},
100043: {'type': 'model',
'name': 'box',
'comment': '',
'parentEntId': 100000,
'pos': Point3(-10.7296, -3.52948, 0),
'hpr': Vec3(270.924, 0, 0),
'scale': Point3(2, 8, 2),
'collisionsOnly': 1,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100044: {'type': 'model',
'name': 'copy of box',
'comment': '',
'parentEntId': 100036,
'pos': Point3(-32.0389, 7.09736, 0),
'hpr': Vec3(90, 0, 0),
'scale': Point3(1, 8, 2),
'collisionsOnly': 1,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100045: {'type': 'model',
'name': 'copy of box (2)',
'comment': '',
'parentEntId': 100006,
'pos': Point3(2.05698, -7.92436, 0),
'hpr': Vec3(270, 0, 0),
'scale': Point3(2, 8, 2),
'collisionsOnly': 1,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
100046: {'type': 'model',
'name': 'copy of box (3)',
'comment': '',
'parentEntId': 100011,
'pos': Point3(1.66589, 6.97288, 0),
'hpr': Vec3(270, 0, 0),
'scale': Point3(2, 8, 2),
'collisionsOnly': 1,
'flattenType': 'light',
'loadType': 'loadModelCopy',
'modelPath': 'phase_11/models/lawbotHQ/PartitionA.bam'},
10000: {'type': 'nodepath',
'name': '0',
'comment': '',
'parentEntId': 100035,
'pos': Point3(-64.0541, 7.6285, 0),
'hpr': Vec3(90, 0, 0),
'scale': Point3(1, 1, 1)},
100020: {'type': 'nodepath',
'name': 'battle place 1',
'comment': '',
'parentEntId': 0,
'pos': Point3(-35.0928, 6.21518, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100021: {'type': 'nodepath',
'name': 'cogparent2',
'comment': '',
'parentEntId': 0,
'pos': Point3(64.2291, 5.46719, 0),
'hpr': Vec3(270, 0, 0),
'scale': Vec3(1, 1, 1)},
100022: {'type': 'nodepath',
'name': 'BattlePlace2',
'comment': '',
'parentEntId': 0,
'pos': Point3(32.4809, 7.68949, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100035: {'type': 'nodepath',
'name': 'cogparent1',
'comment': '',
'parentEntId': 0,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1)},
100000: {'type': 'rendering',
'name': 'PartitionWall2',
'comment': '',
'parentEntId': 0,
'pos': Point3(-53.71, -14.4718, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'blending': 'Alpha',
'colorA': 1.0,
'colorB': 1.0,
'colorG': 1.0,
'colorR': 1.0,
'fogOn': 0,
'renderBin': 'transparent'},
100005: {'type': 'rendering',
'name': 'doorparent',
'comment': '',
'parentEntId': 0,
'pos': Point3(-0.845594, 73.9927, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'blending': 'Normal',
'colorA': 1.0,
'colorB': 0.75,
'colorG': 0.75,
'colorR': 0.8,
'fogOn': 0,
'renderBin': 'transparent'},
100006: {'type': 'rendering',
'name': 'wallparent3',
'comment': '',
'parentEntId': 0,
'pos': Point3(61.7299, -14.6471, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'blending': 'Alpha',
'colorA': 1.0,
'colorB': 1.0,
'colorG': 1.0,
'colorR': 1.0,
'fogOn': 0,
'renderBin': 'transparent'},
100011: {'type': 'rendering',
'name': 'wallprent4',
'comment': '',
'parentEntId': 0,
'pos': Point3(62.3555, 30.4101, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'blending': 'Alpha',
'colorA': 1.0,
'colorB': 1.0,
'colorG': 1.0,
'colorR': 1.0,
'fogOn': 0,
'renderBin': 'transparent'},
100024: {'type': 'rendering',
'name': 'cogrenderingparent2',
'comment': '',
'parentEntId': 0,
'pos': Point3(64.2291, 5.46719, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'blending': 'Additive',
'colorA': 1.0,
'colorB': 0.0,
'colorG': 0.0,
'colorR': 1.0,
'fogOn': 0,
'renderBin': 'fixed'},
100034: {'type': 'rendering',
'name': 'scenaryLight',
'comment': '',
'parentEntId': 0,
'pos': Point3(0, 0, 0),
'hpr': Vec3(0, 0, 0),
'scale': 1,
'blending': 'Alpha',
'colorA': 1.0,
'colorB': 0.5,
'colorG': 0.5,
'colorR': 0.6,
'fogOn': 0,
'renderBin': 'transparent'},
100036: {'type': 'rendering',
'name': 'partitionwall1',
'comment': '',
'parentEntId': 0,
'pos': Point3(-33.8745, 29.3783, 0),
'hpr': Vec3(0, 0, 0),
'scale': Vec3(1, 1, 1),
'blending': 'Normal',
'colorA': 1.0,
'colorB': 1.0,
'colorG': 1.0,
'colorR': 1.0,
'fogOn': 0,
'renderBin': 'transparent'}}
Scenario0 = {}
levelSpec = {'globalEntities': GlobalEntities,
'scenarios': [Scenario0]}
|
Spiderlover/Toontown
|
toontown/coghq/LawbotOfficeLobby_Trap00.py
|
Python
|
mit
| 19,705
|
#!/usr/bin/env python
import ibus
import dbus
bus = dbus.SessionBus()
e = ibus.interface.IPanel()
print e.Introspect("/", bus)
|
ibus/ibus-qt
|
src/interfaces/introspect_panel.py
|
Python
|
gpl-2.0
| 130
|
# #
# Copyright 2012-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Unit tests for tools/variables.py.
@author: Kenneth Hoste (Ghent University)
@author: Stijn De Weirdt (Ghent University)
"""
from test.framework.utilities import EnhancedTestCase
from unittest import TestLoader, main
from easybuild.tools.variables import CommaList, StrList, Variables
from easybuild.tools.toolchain.variables import CommandFlagList
class VariablesTest(EnhancedTestCase):
""" Baseclass for easyblock testcases """
def test_variables(self):
class TestVariables(Variables):
MAP_CLASS = {'FOO':CommaList}
v = TestVariables()
self.assertEqual(str(v), "{}")
# DEFAULTCLASS is StrList
v['BAR'] = range(3)
self.assertEqual(str(v), "{'BAR': [[0, 1, 2]]}")
self.assertEqual(str(v['BAR']), "0 1 2")
v['BAR'].append(StrList(range(10, 12)))
self.assertEqual(str(v['BAR']), "0 1 2 10 11")
v.nappend('BAR', 20)
self.assertEqual(str(v['BAR']), "0 1 2 10 11 20")
v.nappend_el('BAR', 30, idx= -2)
self.assertEqual(str(v), "{'BAR': [[0, 1, 2], [10, 11, 30], [20]]}")
self.assertEqual(str(v['BAR']), '0 1 2 10 11 30 20')
v['FOO'] = range(3)
self.assertEqual(str(v['FOO']), "0,1,2")
v['BARSTR'] = 'XYZ'
self.assertEqual(v['BARSTR'].__repr__(), "[['XYZ']]")
v['BARINT'] = 0
self.assertEqual(v['BARINT'].__repr__(), "[[0]]")
v.join('BAR2', 'FOO', 'BARINT')
self.assertEqual(str(v['BAR2']), "0,1,2 0")
self.assertErrorRegex(Exception, 'not found in self', v.join, 'BAZ', 'DOESNOTEXIST')
cmd = CommandFlagList(["gcc", "bar", "baz"])
self.assertEqual(str(cmd), "gcc -bar -baz")
def test_empty_variables(self):
"""Test playing around with empty variables."""
v = Variables()
v.nappend('FOO', [])
self.assertEqual(v['FOO'], [])
v.join('BAR', 'FOO')
self.assertEqual(v['BAR'], [])
v.join('FOOBAR', 'BAR')
self.assertEqual(v['FOOBAR'], [])
def suite():
""" return all the tests"""
return TestLoader().loadTestsFromTestCase(VariablesTest)
if __name__ == '__main__':
main()
|
valtandor/easybuild-framework
|
test/framework/variables.py
|
Python
|
gpl-2.0
| 3,243
|