repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
ajfriend/cvxpy
refs/heads/master
examples/extensions/kmeans.py
11
import cvxpy as cvx import mixed_integer as mi print(__doc__) from time import time import numpy as np import matplotlib.pyplot as plt from sklearn import metrics from sklearn.cluster import KMeans from sklearn.datasets import load_digits from sklearn.decomposition import PCA from sklearn.preprocessing import scale np.random.seed(42) digits = load_digits() data = scale(digits.data) n_samples, n_features = data.shape n_digits = len(np.unique(digits.target)) labels = digits.target sample_size = 300 print("n_digits: %d, \t n_samples %d, \t n_features %d" % (n_digits, n_samples, n_features)) print(79 * '_') print('% 9s' % 'init' ' time inertia homo compl v-meas ARI AMI silhouette') def bench_k_means(estimator, name, data): t0 = time() estimator.fit(data) print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f' % (name, (time() - t0), estimator.inertia_, metrics.homogeneity_score(labels, estimator.labels_), metrics.completeness_score(labels, estimator.labels_), metrics.v_measure_score(labels, estimator.labels_), metrics.adjusted_rand_score(labels, estimator.labels_), metrics.adjusted_mutual_info_score(labels, estimator.labels_), metrics.silhouette_score(data, estimator.labels_, metric='euclidean', sample_size=sample_size))) bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10), name="k-means++", data=data) bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10), name="random", data=data) # in this case the seeding of the centers is deterministic, hence we run the # kmeans algorithm only once with n_init=1 pca = PCA(n_components=n_digits).fit(data) bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1), name="PCA-based", data=data) print(79 * '_') ############################################################################### # Visualize the results on PCA-reduced data reduced_data = PCA(n_components=2).fit_transform(data) kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10) kmeans.fit(reduced_data) # Step size of the mesh. Decrease to increase the quality of the VQ. h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max]. # Plot the decision boundary. For that, we will assign a color to each x_min, x_max = reduced_data[:, 0].min() + 1, reduced_data[:, 0].max() - 1 y_min, y_max = reduced_data[:, 1].min() + 1, reduced_data[:, 1].max() - 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # Obtain labels for each point in mesh. Use last trained model. Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.figure(1) plt.clf() plt.imshow(Z, interpolation='nearest', extent=(xx.min(), xx.max(), yy.min(), yy.max()), cmap=plt.cm.Paired, aspect='auto', origin='lower') plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2) # Plot the centroids as a white X centroids = kmeans.cluster_centers_ plt.scatter(centroids[:, 0], centroids[:, 1], marker='x', s=169, linewidths=3, color='w', zorder=10) plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n' 'Centroids are marked with white cross') plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.xticks(()) plt.yticks(()) plt.show()
mehanig/scrapi
refs/heads/develop
scrapi/harvesters/pubmedcentral.py
2
""" Harvester of PubMed Central for the SHARE notification service Example API call: http://www.ncbi.nlm.nih.gov/pmc/oai/oai.cgi?verb=ListRecords&metadataPrefix=oai_dc&from=2015-04-13&until=2015-04-14 """ from __future__ import unicode_literals from scrapi.base import helpers from scrapi.base import OAIHarvester def format_uris_pubmedcentral(*args): uris = helpers.oai_process_uris(*args) for arg in args: if arg and 'oai:pubmedcentral.nih.gov:' in arg[0]: PMC_ID = arg[0].replace('oai:pubmedcentral.nih.gov:', '') uris['canonicalUri'] = 'http://www.ncbi.nlm.nih.gov/pmc/articles/PMC' + PMC_ID return uris class PubMedCentralHarvester(OAIHarvester): short_name = 'pubmedcentral' long_name = 'PubMed Central' url = 'http://www.ncbi.nlm.nih.gov/pmc/' @property def schema(self): return helpers.updated_schema(self._schema, { "uris": ('//ns0:header/ns0:identifier/node()', '//dc:identifier/node()', format_uris_pubmedcentral) }) base_url = 'http://www.ncbi.nlm.nih.gov/pmc/oai/oai.cgi' property_list = [ 'type', 'source', 'rights', 'format', 'setSpec', 'date', 'identifier' ]
ryannathans/micropython
refs/heads/master
tests/bench/arrayop-1-list_inplace.py
101
# Array operation # Type: list, inplace operation using for. What's good about this # method is that it doesn't require any extra memory allocation. import bench def test(num): for i in iter(range(num//10000)): arr = [0] * 1000 for i in range(len(arr)): arr[i] += 1 bench.run(test)
blademainer/intellij-community
refs/heads/master
python/helpers/docutils/readers/standalone.py
197
# $Id: standalone.py 4802 2006-11-12 18:02:17Z goodger $ # Author: David Goodger <goodger@python.org> # Copyright: This module has been placed in the public domain. """ Standalone file Reader for the reStructuredText markup syntax. """ __docformat__ = 'reStructuredText' import sys from docutils import frontend, readers from docutils.transforms import frontmatter, references, misc class Reader(readers.Reader): supported = ('standalone',) """Contexts this reader supports.""" document = None """A single document tree.""" settings_spec = ( 'Standalone Reader', None, (('Disable the promotion of a lone top-level section title to ' 'document title (and subsequent section title to document ' 'subtitle promotion; enabled by default).', ['--no-doc-title'], {'dest': 'doctitle_xform', 'action': 'store_false', 'default': 1, 'validator': frontend.validate_boolean}), ('Disable the bibliographic field list transform (enabled by ' 'default).', ['--no-doc-info'], {'dest': 'docinfo_xform', 'action': 'store_false', 'default': 1, 'validator': frontend.validate_boolean}), ('Activate the promotion of lone subsection titles to ' 'section subtitles (disabled by default).', ['--section-subtitles'], {'dest': 'sectsubtitle_xform', 'action': 'store_true', 'default': 0, 'validator': frontend.validate_boolean}), ('Deactivate the promotion of lone subsection titles.', ['--no-section-subtitles'], {'dest': 'sectsubtitle_xform', 'action': 'store_false'}), )) config_section = 'standalone reader' config_section_dependencies = ('readers',) def get_transforms(self): return readers.Reader.get_transforms(self) + [ references.Substitutions, references.PropagateTargets, frontmatter.DocTitle, frontmatter.SectionSubTitle, frontmatter.DocInfo, references.AnonymousHyperlinks, references.IndirectHyperlinks, references.Footnotes, references.ExternalTargets, references.InternalTargets, references.DanglingReferences, misc.Transitions, ]
hellodata/hellodate
refs/heads/master
2/site-packages/django/contrib/gis/gdal/error.py
104
""" This module houses the OGR & SRS Exception objects, and the check_err() routine which checks the status code returned by OGR methods. """ #### OGR & SRS Exceptions #### class GDALException(Exception): pass class OGRException(Exception): pass class SRSException(Exception): pass class OGRIndexError(OGRException, KeyError): """ This exception is raised when an invalid index is encountered, and has the 'silent_variable_feature' attribute set to true. This ensures that django's templates proceed to use the next lookup type gracefully when an Exception is raised. Fixes ticket #4740. """ silent_variable_failure = True #### OGR error checking codes and routine #### # OGR Error Codes OGRERR_DICT = { 1: (OGRException, 'Not enough data.'), 2: (OGRException, 'Not enough memory.'), 3: (OGRException, 'Unsupported geometry type.'), 4: (OGRException, 'Unsupported operation.'), 5: (OGRException, 'Corrupt data.'), 6: (OGRException, 'OGR failure.'), 7: (SRSException, 'Unsupported SRS.'), 8: (OGRException, 'Invalid handle.'), } OGRERR_NONE = 0 def check_err(code): "Checks the given OGRERR, and raises an exception where appropriate." if code == OGRERR_NONE: return elif code in OGRERR_DICT: e, msg = OGRERR_DICT[code] raise e(msg) else: raise OGRException('Unknown error code: "%s"' % code)
katrid/django
refs/heads/master
tests/gis_tests/geogapp/tests.py
253
""" Tests for geography support in PostGIS """ from __future__ import unicode_literals import os from unittest import skipUnless from django.contrib.gis.db.models.functions import Area, Distance from django.contrib.gis.gdal import HAS_GDAL from django.contrib.gis.measure import D from django.test import TestCase, ignore_warnings, skipUnlessDBFeature from django.utils._os import upath from django.utils.deprecation import RemovedInDjango20Warning from ..utils import oracle, postgis from .models import City, County, Zipcode @skipUnlessDBFeature("gis_enabled") class GeographyTest(TestCase): fixtures = ['initial'] def test01_fixture_load(self): "Ensure geography features loaded properly." self.assertEqual(8, City.objects.count()) @skipUnlessDBFeature("supports_distances_lookups", "supports_distance_geodetic") def test02_distance_lookup(self): "Testing GeoQuerySet distance lookup support on non-point geography fields." z = Zipcode.objects.get(code='77002') cities1 = list(City.objects .filter(point__distance_lte=(z.poly, D(mi=500))) .order_by('name') .values_list('name', flat=True)) cities2 = list(City.objects .filter(point__dwithin=(z.poly, D(mi=500))) .order_by('name') .values_list('name', flat=True)) for cities in [cities1, cities2]: self.assertEqual(['Dallas', 'Houston', 'Oklahoma City'], cities) @skipUnlessDBFeature("has_distance_method", "supports_distance_geodetic") @ignore_warnings(category=RemovedInDjango20Warning) def test03_distance_method(self): "Testing GeoQuerySet.distance() support on non-point geography fields." # `GeoQuerySet.distance` is not allowed geometry fields. htown = City.objects.get(name='Houston') Zipcode.objects.distance(htown.point) @skipUnless(postgis, "This is a PostGIS-specific test") def test04_invalid_operators_functions(self): "Ensuring exceptions are raised for operators & functions invalid on geography fields." # Only a subset of the geometry functions & operator are available # to PostGIS geography types. For more information, visit: # http://postgis.refractions.net/documentation/manual-1.5/ch08.html#PostGIS_GeographyFunctions z = Zipcode.objects.get(code='77002') # ST_Within not available. self.assertRaises(ValueError, City.objects.filter(point__within=z.poly).count) # `@` operator not available. self.assertRaises(ValueError, City.objects.filter(point__contained=z.poly).count) # Regression test for #14060, `~=` was never really implemented for PostGIS. htown = City.objects.get(name='Houston') self.assertRaises(ValueError, City.objects.get, point__exact=htown.point) @skipUnless(HAS_GDAL, "GDAL is required.") def test05_geography_layermapping(self): "Testing LayerMapping support on models with geography fields." # There is a similar test in `layermap` that uses the same data set, # but the County model here is a bit different. from django.contrib.gis.utils import LayerMapping # Getting the shapefile and mapping dictionary. shp_path = os.path.realpath(os.path.join(os.path.dirname(upath(__file__)), '..', 'data')) co_shp = os.path.join(shp_path, 'counties', 'counties.shp') co_mapping = {'name': 'Name', 'state': 'State', 'mpoly': 'MULTIPOLYGON', } # Reference county names, number of polygons, and state names. names = ['Bexar', 'Galveston', 'Harris', 'Honolulu', 'Pueblo'] num_polys = [1, 2, 1, 19, 1] # Number of polygons for each. st_names = ['Texas', 'Texas', 'Texas', 'Hawaii', 'Colorado'] lm = LayerMapping(County, co_shp, co_mapping, source_srs=4269, unique='name') lm.save(silent=True, strict=True) for c, name, num_poly, state in zip(County.objects.order_by('name'), names, num_polys, st_names): self.assertEqual(4326, c.mpoly.srid) self.assertEqual(num_poly, len(c.mpoly)) self.assertEqual(name, c.name) self.assertEqual(state, c.state) @skipUnlessDBFeature("has_area_method", "supports_distance_geodetic") @ignore_warnings(category=RemovedInDjango20Warning) def test06_geography_area(self): "Testing that Area calculations work on geography columns." # SELECT ST_Area(poly) FROM geogapp_zipcode WHERE code='77002'; ref_area = 5439100.95415646 if oracle else 5439084.70637573 tol = 5 z = Zipcode.objects.area().get(code='77002') self.assertAlmostEqual(z.area.sq_m, ref_area, tol) @skipUnlessDBFeature("gis_enabled") class GeographyFunctionTests(TestCase): fixtures = ['initial'] @skipUnlessDBFeature("has_Distance_function", "supports_distance_geodetic") def test_distance_function(self): """ Testing Distance() support on non-point geography fields. """ ref_dists = [0, 4891.20, 8071.64, 9123.95] htown = City.objects.get(name='Houston') qs = Zipcode.objects.annotate(distance=Distance('poly', htown.point)) for z, ref in zip(qs, ref_dists): self.assertAlmostEqual(z.distance.m, ref, 2) @skipUnlessDBFeature("has_Area_function", "supports_distance_geodetic") def test_geography_area(self): """ Testing that Area calculations work on geography columns. """ # SELECT ST_Area(poly) FROM geogapp_zipcode WHERE code='77002'; ref_area = 5439100.95415646 if oracle else 5439084.70637573 tol = 5 z = Zipcode.objects.annotate(area=Area('poly')).get(code='77002') self.assertAlmostEqual(z.area.sq_m, ref_area, tol)
frankiecjunle/yunblog
refs/heads/master
venv/lib/python2.7/site-packages/flask/sessions.py
119
# -*- coding: utf-8 -*- """ flask.sessions ~~~~~~~~~~~~~~ Implements cookie based sessions based on itsdangerous. :copyright: (c) 2015 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import uuid import hashlib from base64 import b64encode, b64decode from datetime import datetime from werkzeug.http import http_date, parse_date from werkzeug.datastructures import CallbackDict from . import Markup, json from ._compat import iteritems, text_type from .helpers import total_seconds from itsdangerous import URLSafeTimedSerializer, BadSignature class SessionMixin(object): """Expands a basic dictionary with an accessors that are expected by Flask extensions and users for the session. """ def _get_permanent(self): return self.get('_permanent', False) def _set_permanent(self, value): self['_permanent'] = bool(value) #: this reflects the ``'_permanent'`` key in the dict. permanent = property(_get_permanent, _set_permanent) del _get_permanent, _set_permanent #: some session backends can tell you if a session is new, but that is #: not necessarily guaranteed. Use with caution. The default mixin #: implementation just hardcodes ``False`` in. new = False #: for some backends this will always be ``True``, but some backends will #: default this to false and detect changes in the dictionary for as #: long as changes do not happen on mutable structures in the session. #: The default mixin implementation just hardcodes ``True`` in. modified = True def _tag(value): if isinstance(value, tuple): return {' t': [_tag(x) for x in value]} elif isinstance(value, uuid.UUID): return {' u': value.hex} elif isinstance(value, bytes): return {' b': b64encode(value).decode('ascii')} elif callable(getattr(value, '__html__', None)): return {' m': text_type(value.__html__())} elif isinstance(value, list): return [_tag(x) for x in value] elif isinstance(value, datetime): return {' d': http_date(value)} elif isinstance(value, dict): return dict((k, _tag(v)) for k, v in iteritems(value)) elif isinstance(value, str): try: return text_type(value) except UnicodeError: from flask.debughelpers import UnexpectedUnicodeError raise UnexpectedUnicodeError(u'A byte string with ' u'non-ASCII data was passed to the session system ' u'which can only store unicode strings. Consider ' u'base64 encoding your string (String was %r)' % value) return value class TaggedJSONSerializer(object): """A customized JSON serializer that supports a few extra types that we take for granted when serializing (tuples, markup objects, datetime). """ def dumps(self, value): return json.dumps(_tag(value), separators=(',', ':')) LOADS_MAP = { ' t': tuple, ' u': uuid.UUID, ' b': b64decode, ' m': Markup, ' d': parse_date, } def loads(self, value): def object_hook(obj): if len(obj) != 1: return obj the_key, the_value = next(iteritems(obj)) # Check the key for a corresponding function return_function = self.LOADS_MAP.get(the_key) if return_function: # Pass the value to the function return return_function(the_value) # Didn't find a function for this object return obj return json.loads(value, object_hook=object_hook) session_json_serializer = TaggedJSONSerializer() class SecureCookieSession(CallbackDict, SessionMixin): """Base class for sessions based on signed cookies.""" def __init__(self, initial=None): def on_update(self): self.modified = True CallbackDict.__init__(self, initial, on_update) self.modified = False class NullSession(SecureCookieSession): """Class used to generate nicer error messages if sessions are not available. Will still allow read-only access to the empty session but fail on setting. """ def _fail(self, *args, **kwargs): raise RuntimeError('The session is unavailable because no secret ' 'key was set. Set the secret_key on the ' 'application to something unique and secret.') __setitem__ = __delitem__ = clear = pop = popitem = \ update = setdefault = _fail del _fail class SessionInterface(object): """The basic interface you have to implement in order to replace the default session interface which uses werkzeug's securecookie implementation. The only methods you have to implement are :meth:`open_session` and :meth:`save_session`, the others have useful defaults which you don't need to change. The session object returned by the :meth:`open_session` method has to provide a dictionary like interface plus the properties and methods from the :class:`SessionMixin`. We recommend just subclassing a dict and adding that mixin:: class Session(dict, SessionMixin): pass If :meth:`open_session` returns ``None`` Flask will call into :meth:`make_null_session` to create a session that acts as replacement if the session support cannot work because some requirement is not fulfilled. The default :class:`NullSession` class that is created will complain that the secret key was not set. To replace the session interface on an application all you have to do is to assign :attr:`flask.Flask.session_interface`:: app = Flask(__name__) app.session_interface = MySessionInterface() .. versionadded:: 0.8 """ #: :meth:`make_null_session` will look here for the class that should #: be created when a null session is requested. Likewise the #: :meth:`is_null_session` method will perform a typecheck against #: this type. null_session_class = NullSession #: A flag that indicates if the session interface is pickle based. #: This can be used by Flask extensions to make a decision in regards #: to how to deal with the session object. #: #: .. versionadded:: 0.10 pickle_based = False def make_null_session(self, app): """Creates a null session which acts as a replacement object if the real session support could not be loaded due to a configuration error. This mainly aids the user experience because the job of the null session is to still support lookup without complaining but modifications are answered with a helpful error message of what failed. This creates an instance of :attr:`null_session_class` by default. """ return self.null_session_class() def is_null_session(self, obj): """Checks if a given object is a null session. Null sessions are not asked to be saved. This checks if the object is an instance of :attr:`null_session_class` by default. """ return isinstance(obj, self.null_session_class) def get_cookie_domain(self, app): """Helpful helper method that returns the cookie domain that should be used for the session cookie if session cookies are used. """ if app.config['SESSION_COOKIE_DOMAIN'] is not None: return app.config['SESSION_COOKIE_DOMAIN'] if app.config['SERVER_NAME'] is not None: # chop off the port which is usually not supported by browsers rv = '.' + app.config['SERVER_NAME'].rsplit(':', 1)[0] # Google chrome does not like cookies set to .localhost, so # we just go with no domain then. Flask documents anyways that # cross domain cookies need a fully qualified domain name if rv == '.localhost': rv = None # If we infer the cookie domain from the server name we need # to check if we are in a subpath. In that case we can't # set a cross domain cookie. if rv is not None: path = self.get_cookie_path(app) if path != '/': rv = rv.lstrip('.') return rv def get_cookie_path(self, app): """Returns the path for which the cookie should be valid. The default implementation uses the value from the ``SESSION_COOKIE_PATH`` config var if it's set, and falls back to ``APPLICATION_ROOT`` or uses ``/`` if it's ``None``. """ return app.config['SESSION_COOKIE_PATH'] or \ app.config['APPLICATION_ROOT'] or '/' def get_cookie_httponly(self, app): """Returns True if the session cookie should be httponly. This currently just returns the value of the ``SESSION_COOKIE_HTTPONLY`` config var. """ return app.config['SESSION_COOKIE_HTTPONLY'] def get_cookie_secure(self, app): """Returns True if the cookie should be secure. This currently just returns the value of the ``SESSION_COOKIE_SECURE`` setting. """ return app.config['SESSION_COOKIE_SECURE'] def get_expiration_time(self, app, session): """A helper method that returns an expiration date for the session or ``None`` if the session is linked to the browser session. The default implementation returns now + the permanent session lifetime configured on the application. """ if session.permanent: return datetime.utcnow() + app.permanent_session_lifetime def should_set_cookie(self, app, session): """Indicates whether a cookie should be set now or not. This is used by session backends to figure out if they should emit a set-cookie header or not. The default behavior is controlled by the ``SESSION_REFRESH_EACH_REQUEST`` config variable. If it's set to ``False`` then a cookie is only set if the session is modified, if set to ``True`` it's always set if the session is permanent. This check is usually skipped if sessions get deleted. .. versionadded:: 0.11 """ if session.modified: return True save_each = app.config['SESSION_REFRESH_EACH_REQUEST'] return save_each and session.permanent def open_session(self, app, request): """This method has to be implemented and must either return ``None`` in case the loading failed because of a configuration error or an instance of a session object which implements a dictionary like interface + the methods and attributes on :class:`SessionMixin`. """ raise NotImplementedError() def save_session(self, app, session, response): """This is called for actual sessions returned by :meth:`open_session` at the end of the request. This is still called during a request context so if you absolutely need access to the request you can do that. """ raise NotImplementedError() class SecureCookieSessionInterface(SessionInterface): """The default session interface that stores sessions in signed cookies through the :mod:`itsdangerous` module. """ #: the salt that should be applied on top of the secret key for the #: signing of cookie based sessions. salt = 'cookie-session' #: the hash function to use for the signature. The default is sha1 digest_method = staticmethod(hashlib.sha1) #: the name of the itsdangerous supported key derivation. The default #: is hmac. key_derivation = 'hmac' #: A python serializer for the payload. The default is a compact #: JSON derived serializer with support for some extra Python types #: such as datetime objects or tuples. serializer = session_json_serializer session_class = SecureCookieSession def get_signing_serializer(self, app): if not app.secret_key: return None signer_kwargs = dict( key_derivation=self.key_derivation, digest_method=self.digest_method ) return URLSafeTimedSerializer(app.secret_key, salt=self.salt, serializer=self.serializer, signer_kwargs=signer_kwargs) def open_session(self, app, request): s = self.get_signing_serializer(app) if s is None: return None val = request.cookies.get(app.session_cookie_name) if not val: return self.session_class() max_age = total_seconds(app.permanent_session_lifetime) try: data = s.loads(val, max_age=max_age) return self.session_class(data) except BadSignature: return self.session_class() def save_session(self, app, session, response): domain = self.get_cookie_domain(app) path = self.get_cookie_path(app) # Delete case. If there is no session we bail early. # If the session was modified to be empty we remove the # whole cookie. if not session: if session.modified: response.delete_cookie(app.session_cookie_name, domain=domain, path=path) return # Modification case. There are upsides and downsides to # emitting a set-cookie header each request. The behavior # is controlled by the :meth:`should_set_cookie` method # which performs a quick check to figure out if the cookie # should be set or not. This is controlled by the # SESSION_REFRESH_EACH_REQUEST config flag as well as # the permanent flag on the session itself. if not self.should_set_cookie(app, session): return httponly = self.get_cookie_httponly(app) secure = self.get_cookie_secure(app) expires = self.get_expiration_time(app, session) val = self.get_signing_serializer(app).dumps(dict(session)) response.set_cookie(app.session_cookie_name, val, expires=expires, httponly=httponly, domain=domain, path=path, secure=secure)
TU-Berlin-DIMA/myriad-toolkit
refs/heads/master
src/python/myriad/dgen.py
2
''' Copyright 2010-2013 DIMA Research Group, TU Berlin Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Created on Mar 2, 2011 @author: Alexander Alexandrov <alexander.alexandrov@tu-berlin.de> ''' import os, sys, tempfile, optparse, datetime, time import BaseHTTPServer import config from urlparse import urlparse from SocketServer import ThreadingMixIn from threading import Thread, Lock, RLock from myriad.util import sysutil, timeutil import httplib class DGen(object): ''' classdocs ''' basePath = None dgenName = None nodeName = None config = None dgenConfig = None parser = None logBase = None cleanup = None sf = None configPath = None nodeConfig = None datasetID = None executeStages = None configName = None serverAddress = None dgenMaster = None dgenNodes = None started = None finished = None log = None VERSION = "0.3.0" def __init__(self, basePath, dgenName, argv): ''' Constructor ''' self.basePath = basePath self.dgenName = dgenName self.initialize(argv) self.dgenNodes = [] def initialize(self, argv): parser = optparse.OptionParser(usage="%prog [options] <dgen-config>", version=self.VERSION) parser.add_option("-s", dest="sf", type="float", default=1.0, help="scaling factor (sf=1 generates 1GB data)") parser.add_option("-m", dest="dataset_id", type="str", default="default-dataset", help="ID of the generated Myriad dataset") parser.add_option("-x", dest="execute_stages", action="append", type="str", default=[], help="Specify a specific stage to execute") parser.add_option("-n", dest="node_config", type="str", default="%s-node.xml" % (self.dgenName), help="name of the node config file (should reside in the config dir)") parser.add_option("--config-dir", dest="config_path", type="str", default="%s/config" % (self.basePath), help="path to the myriad config folder (TODO)") parser.add_option("--log-dir", dest="log_dir", type="str", default=None, help="base directory for output logging") parser.add_option("--cleanup", dest="cleanup", action="store_true", help="remove output from previously generated job") self.parser = parser args, remainder = parser.parse_args(argv) if (len(remainder) != 1): self.error(None, True) raise self.log = sysutil.createLogger("myriad.dgen") try: self.cleanup = args.cleanup self.sf = args.sf self.datasetID = args.dataset_id self.configPath = args.config_path self.nodeConfig = args.node_config self.logBase = args.log_dir self.executeStages = args.execute_stages self.configName = remainder.pop() # load myriad config self.config = config.readConfig(self.dgenName, self.nodeConfig, "%s/%s-frontend.xml" % (self.configPath, self.dgenName)) # load sizing config self.dgenConfig = config.readDGenConfig("%s/%s-node.properties" % (self.configPath, self.dgenName)) DGenNode.MAX_ATTEMPTS = int(self.dgenConfig.getProperty("coordinator.node.max.attempts", DGenNode.MAX_ATTEMPTS)) DGenNode.DEAD_TIMEOUT = datetime.timedelta(0, 0, 0, int(self.dgenConfig.getProperty("coordinator.node.dead.timeout", DGenNode.DEAD_TIMEOUT.seconds*1000))) NodeMonitor.POLL_INTERVAL = int(self.dgenConfig.getProperty("coordinator.node.monitor.interval", NodeMonitor.POLL_INTERVAL*1000))/1000.0 if (self.logBase == None): # create log dir self.logBase = tempfile.mkdtemp("", "%s-frontend-%s_" % (self.dgenName, self.datasetID)) # make sure that logBase directories exist sysutil.checkDir(self.logBase) # register file handler to the logger sysutil.registerFileHandler(self.log, "%s/%s-frontend.log" % (self.logBase, self.dgenName)) except: e = sys.exc_info()[1] self.error("unexpected error: %s" % (str(e)), True) raise def run(self): ''' Srart the distributed generation process using the specified dgen configName. ''' self.started = datetime.datetime.now() server = None monitor = None try: if (self.cleanup): slaves = self.config.slaves(self.configName) self.log.info("~" * 55) self.log.info("Myriad Parallel Data Generator (Version %s)", self.VERSION) self.log.info("~" * 55) self.log.info("cleaning configuration `%s`", self.configName) for h in slaves: DGenHost(h).clean(self) else: master = self.config.master(self.configName) nodes = self.config.nodes(self.configName) self.log.info("~" * 55) self.log.info("Myriad Parallel Data Generator (Version %s)", self.VERSION) self.log.info("~" * 55) self.log.info("running configuration `%s` with scaling factor %.3f", self.configName, self.sf) self.dgenMaster = master self.dgenNodes = [ DGenNode(n) for n in nodes ] self.log.info("starting heartbeat server on address `%s:%d`", self.dgenMaster.name, self.dgenMaster.coorServerPort) server = HeartbeatServer(self.datasetID, self.dgenNodes, ('0.0.0.0', self.dgenMaster.coorServerPort)) # start node monitor self.log.info("starting node monitor thread") monitor = NodeMonitor(self, server) monitor.start() # start server loop serverThread = Thread(target=server.serveLoop) serverThread.start() self.log.info("starting %d generator nodes", len(self.dgenNodes)) self.startNodes() # wait for server thread to finish (timeout and loop needed for KeyboardInterrupt) while(serverThread.isAlive()): serverThread.join(3.0) # wait for monitor thread monitor.join() if (monitor.exception): self.log.error("interrupting generation process after failure in node %d ", monitor.exception.id) raise monitor.exception # abort all running nodes self.abortAllNodes() self.finished = datetime.datetime.now() self.log.info("generator process finished in %s seconds", timeutil.formatTime(self.finished - self.started)) except KeyboardInterrupt: self.log.warning("execution interrupted by user") if (monitor != None): monitor.shutdown() self.abortAllNodes() raise except NodeFailureException, e: self.abortAllNodes() if (monitor != None): monitor.shutdown() self.error(str(e), False) raise except config.UnknownConfigObjectException, e: self.abortAllNodes() self.error(str(e), False) raise except: e = sys.exc_info()[1] if (monitor != None): monitor.shutdown() self.error(str(e), False) raise self.abortAllNodes() def startNodes(self): for node in self.dgenNodes: node.start(self, len(self.dgenNodes)) def abortAllNodes(self): for node in self.dgenNodes: node.abort(self, len(self.dgenNodes)) def error(self, message=None, withUsage = False): if (withUsage): self.parser.print_usage(sys.stderr) if (message != None): self.log.error("%s: error: %s", self.parser.get_prog_name(), message) class NodeMonitor(Thread): POLL_INTERVAL = 5.0 dgen = None server = None exception = None isShutdown = False log = None def __init__(self, dgen, server): Thread.__init__(self) self.dgen = dgen self.server = server self.isShutdown = False self.log = sysutil.getExistingLogger("myriad.dgen") def run(self): while (not self.isShutdown): time.sleep(3.0) self.server.nonReadyLock.acquire() try: if (self.server.nonReady == 0): self.isShutdown = True for node in self.server.nodes: if (node.isDead()): self.log.warning("restarting dead node #%d", node.id) node.restart(self.dgen, len(self.server.nodes)) except NodeFailureException, e: self.isShutdown = True self.exception = e self.server.nonReadyLock.release() self.server.stopServeLoop() def shutdown(self): self.isShutdown = True class HeartbeatServer(ThreadingMixIn, BaseHTTPServer.HTTPServer): datasetID = None nodes = [] nonReady = None nonReadyLock = None isShutdown = False def __init__(self, datasetID, nodes, address): BaseHTTPServer.HTTPServer.__init__(self, address, RequestHandler) self.datasetID = datasetID self.nodes = nodes self.nonReady = len(nodes) self.nonReadyLock = Lock() self.isShutdown = False def serveLoop(self): while (not self.isShutdown): self.handle_request() def stopServeLoop(self): self.isShutdown = True self.makeSentinelRequest() def makeSentinelRequest(self): try: conn = httplib.HTTPConnection(self.server_address[0], self.server_address[1]) conn.request("GET", "/sentinel") conn.getresponse() conn.close() except: pass class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): GET_REFRESH = 8000 def do_HEAD(self): self.send_response(200) self.end_headers() result = urlparse(self.path, '', False) params = dict([part.split('=') for part in result[4].split('&')]) status = int(params['status']) if (status >= DGenNode.INITIALIZING and status <= DGenNode.ABORTED): # valid values for heartbeats node = self.server.nodes[int(params['id'])] node.lock.acquire() if (status == DGenNode.READY): self.server.nonReadyLock.acquire() node.lastBeat = datetime.datetime.now() if (node.status != status and node.status < status): log = sysutil.getExistingLogger("myriad.dgen") log.info("node %05d: %s -> %s", node.id, DGenNode.STATUS_STRING[node.status], DGenNode.STATUS_STRING[status]) node.status = status if (node.status == DGenNode.ACTIVE): node.progress = float(params['progress']) elif (node.status == DGenNode.READY): node.progress = 1.0 node.finished = datetime.datetime.now() self.server.nonReady -= 1 elif (node.status == DGenNode.ABORTED or node.status == DGenNode.FAILED): pass else: log = sysutil.getExistingLogger("myriad.dgen") log.error("unknown status %d for node %d", node.status, node.id) if (status == DGenNode.READY): self.server.nonReadyLock.release() node.lock.release() def do_GET(self): if (self.path == '/sentinel'): self.send_response(200) self.end_headers() return elif (self.path != '/'): self.send_response(404) self.end_headers() return self.send_response(200) self.send_header('Content-type', 'text/html') self.end_headers() print >> self.wfile, "<html>" print >> self.wfile, "<head>" print >> self.wfile, " <title>Myriad Parallel Data Generator &raquo; % s</title>" % (self.server.datasetID) print >> self.wfile, " <script type='text/JavaScript'>" print >> self.wfile, " <!--" print >> self.wfile, " function timedRefresh(timeoutPeriod) {" print >> self.wfile, " setTimeout('location.reload(true);', timeoutPeriod);" print >> self.wfile, " }" print >> self.wfile, " // -->" print >> self.wfile, " </script>" print >> self.wfile, "</head>" print >> self.wfile, "<body style='margin: 0; padding: 2ex 2em; font-size: 14px;' onload='javascript:timedRefresh(%d);'>" % (self.GET_REFRESH) print >> self.wfile, "<div id='header' style='text-align: center;'>" print >> self.wfile, " <h1 style='color: #333; font-size: 2em; margin: 0 0 0.5ex 0; padding: 0;'>Myriad Parallel Data Generator</h1>" print >> self.wfile, " <h2 style='color: #333; font-size: 1.5em; margin: 0 0 3ex 0; padding: 0;'>Job coordinator for dataset &raquo;%s&laquo; </h2>" % (self.server.datasetID) print >> self.wfile, "</div>" print >> self.wfile, "<table style='width: 100%; border: 1px solid #999;' cellspacing='5' cellpadding='0'>" print >> self.wfile, "<thead>" print >> self.wfile, "<tr>" print >> self.wfile, " <td style='width: 10%; background: #454545; color: #fafafa; padding: 0.5ex'>Node #</td>" print >> self.wfile, " <td style='width: 20%; background: #454545; color: #fafafa; padding: 0.5ex'>Hostname</td>" print >> self.wfile, " <td style='width: 35%; background: #454545; color: #fafafa; padding: 0.5ex'>Progress</td>" print >> self.wfile, " <td style='width: 15%; background: #454545; color: #fafafa; padding: 0.5ex'>Status</td>" print >> self.wfile, " <td style='width: 10%; background: #454545; color: #fafafa; padding: 0.5ex'>Attempt #</td>" print >> self.wfile, " <td style='width: 10%; background: #454545; color: #fafafa; padding: 0.5ex'>Time</td>" print >> self.wfile, "</tr>" print >> self.wfile, "</thead>" print >> self.wfile, "<tbody>" for n in self.server.nodes: print >> self.wfile, "<tr>" print >> self.wfile, " <td style='background: #fafafa; color: #454545; padding: 0.5ex'>%05d</td>" % (n.id) print >> self.wfile, " <td style='background: #fafafa; color: #454545; padding: 0.5ex'>%s</td>" % (n.host) print >> self.wfile, " <td style='background: #fafafa; color: #454545; padding: 0.5ex'><span style='float: left; width: 15%%;'>%02d%%</span><span style='float: left; width: %d%%; border-left: 1px solid #666; background: #666; color: #666; overflow: hidden;'>&raquo;</span></td>" % (100 * n.progress, 80 * n.progress) print >> self.wfile, " <td style='background: #fafafa; color: #454545; padding: 0.5ex'>%s</td>" % (DGenNode.STATUS_STRING[n.status]) print >> self.wfile, " <td style='background: #fafafa; color: #454545; padding: 0.5ex'>%d</td>" % (n.attempt) print >> self.wfile, " <td style='background: #fafafa; color: #454545; padding: 0.5ex'>%s</td>" % (timeutil.formatTime(n.lastBeat - n.started)) print >> self.wfile, "</tr>" print >> self.wfile, "</tbody>" print >> self.wfile, "</table>" print >> self.wfile, "<body>" print >> self.wfile, "</html>" def log_request(self, code='-', size='-'): ''' Disable request logging for the communication server ''' pass class DGenHost(config.Host): ''' classdocs ''' name = None outputBase = None def __init__(self, envNode): ''' Constructor ''' self.name = envNode.name self.outputBase = envNode.outputBase def clean(self, dgen): log = sysutil.getExistingLogger("myriad.dgen") log.info("clearing host `%s`", self.name) os.system("ssh %s 'rm -Rf %s/%s'" % (self.name, self.outputBase, dgen.datasetID)) class DGenNode(config.Node): ''' classdocs ''' id = None host = None dgenPath = None dgenName = None outputBase = None lock = None attempt = None progress = None status = None started = None finished = None lastBeat = None NEW = -1 INITIALIZING = 0 ACTIVE = 1 READY = 2 ABORTED = 3 FAILED = 4 DEAD_TIMEOUT = datetime.timedelta(0, 30) MAX_ATTEMPTS = 3 STATUS_STRING = { -1: "NEW", 0: "INITIALIZING", 1: "ACTIVE", 2: "READY", 3: "ABORTED", 4: "FAILED", } def __init__(self, envNode): ''' Constructor ''' self.id = envNode.id self.host = envNode.host self.dgenPath = envNode.dgenPath self.dgenName = envNode.dgenName self.outputBase = envNode.outputBase self.nodeConfig = envNode.nodeConfig self.attempt = 0 self.resetState() self.lock = RLock() def start(self, dgen, nodesTotal): self.lock.acquire(); os.system("ssh -f %s '%s/bin/%s-node -s%.3f -m%s -i%d -N%d -H%s -P%d -o%s -n%s %s > /dev/null 2> /dev/null &'" % (self.host, self.dgenPath, self.dgenName, dgen.sf, dgen.datasetID, self.id, nodesTotal, dgen.dgenMaster.name, dgen.dgenMaster.coorServerPort, self.outputBase, self.nodeConfig, ' '.join(map(lambda s: '-x%s' % s, dgen.executeStages)))) self.attempt += 1 self.resetState() self.lock.release() def restart(self, dgen, nodesTotal): self.lock.acquire(); if (self.attempt < DGenNode.MAX_ATTEMPTS): os.system("ssh -f %s '%s/bin/%s-kill %d %s > /dev/null 2> /dev/null'" % (self.host, self.dgenPath, self.dgenName, self.id, dgen.datasetID)) os.system("ssh -f %s '%s/bin/%s-node -s%.3f -m%s -i%d -N%d -H%s -P%d -o%s -n%s %s > /dev/null 2> /dev/null &'" % (self.host, self.dgenPath, self.dgenName, dgen.sf, dgen.datasetID, self.id, nodesTotal, dgen.dgenMaster.name, dgen.dgenMaster.coorServerPort, self.outputBase, self.nodeConfig, ' '.join(map(lambda s: '-x%s' % s, dgen.executeStages)))) self.attempt += 1 self.resetState() self.lock.release() else: self.status = DGenNode.FAILED self.lock.release() raise NodeFailureException(self.id) def abort(self, dgen, nodesTotal): self.lock.acquire() if (self.status < DGenNode.READY): log = sysutil.getExistingLogger("myriad.dgen") log.info("aborting node #%03d" % (self.id)) os.system("ssh -f %s '%s/bin/%s-kill %d %s > /dev/null 2> /dev/null'" % (self.host, self.dgenPath, self.dgenName, self.id, dgen.datasetID)) self.status = DGenNode.FAILED self.lock.release() def isDead(self): self.lock.acquire() if (self.status == DGenNode.FAILED): self.lock.release() raise NodeFailureException(self.id) if (self.status == DGenNode.READY): self.lock.release() return False elif (self.status == DGenNode.ABORTED): self.lock.release() return True else: diff = datetime.datetime.now() - self.lastBeat self.lock.release() return diff > DGenNode.DEAD_TIMEOUT def resetState(self): self.progress = 0.0 self.status = DGenNode.NEW self.started = datetime.datetime.now() self.lastBeat = datetime.datetime.now() class NodeFailureException(RuntimeError): id = None def __init__(self, id): self.id = id def __str__(self): return "node %d failed" % (self.id)
ox-it/django-conneg
refs/heads/master
django_conneg/tests/priorities.py
1
import itertools import unittest from django.http import HttpResponse from django_conneg import http, views, decorators class PriorityTestCase(unittest.TestCase): mimetypes = ('text/plain', 'application/xml', 'text/html', 'application/json') def getRenderer(self, format, mimetypes, name, priority): if not isinstance(mimetypes, tuple): mimetypes = (mimetypes,) def renderer(request, context, template_name): return HttpResponse('', mimetype=mimetypes[0]) renderer.__name__ = 'render_%s' % mimetypes[0].replace('/', '_') renderer = decorators.renderer(format=format, mimetypes=mimetypes, priority=priority)(renderer) return renderer def getTestView(self, priorities): members = {} for i, (mimetype, priority) in enumerate(priorities.items()): members['render_%d' % i] = self.getRenderer(str(i), mimetype, str(i), priority) TestView = type('TestView', (views.ContentNegotiatedView,), members) return TestView def testEqualQuality(self): accept_header = ', '.join(self.mimetypes) accept = http.MediaType.parse_accept_header(accept_header) for mimetypes in itertools.permutations(self.mimetypes): renderers = tuple(self.getRenderer(str(i), mimetype, str(i), -i) for i, mimetype in enumerate(mimetypes)) renderers = http.MediaType.resolve(accept, renderers) for renderer, mimetype in zip(renderers, mimetypes): self.assertEqual(next(iter(renderer.mimetypes)), http.MediaType(mimetype)) def testEqualQualityView(self): accept_header = ', '.join(self.mimetypes) accept = http.MediaType.parse_accept_header(accept_header) for mimetypes in itertools.permutations(self.mimetypes): priorities = dict((mimetype, -i) for i, mimetype in enumerate(mimetypes)) test_view = self.getTestView(priorities).as_view() renderers = http.MediaType.resolve(accept, test_view.conneg.renderers) for renderer, mimetype in zip(renderers, mimetypes): self.assertEqual(next(iter(renderer.mimetypes)), http.MediaType(mimetype)) def testPrioritySorting(self): for mimetypes in itertools.permutations(self.mimetypes): priorities = dict((mimetype, -i) for i, mimetype in enumerate(mimetypes)) test_view = self.getTestView(priorities).as_view() renderer_priorities = [renderer.priority for renderer in test_view.conneg.renderers] self.assertEqual(renderer_priorities, sorted(renderer_priorities, reverse=True)) if __name__ == '__main__': unittest.main()
jason-weirather/IDP
refs/heads/master
bin/parseRef.py
2
#!/usr/bin/python from operator import itemgetter, attrgetter from datetime import * import sys """ Note: """ ### Compute number of bases to junction gap from an exon index (exclusive) ### exon_list format: [start+point, end_point, len] ########## def comp_len_to_junc_gap_forward(start_index, exon_list, isoform_exons, isoform_points_dict): if (start_index >= len(exon_list)): print 'Warning: Called comp_len_to_junc_gap_forward with start_idx of out of bound' return 0 index = start_index l = 0 next_index = index +1 while (next_index< len(exon_list)): region_name_temp = 'P' + str(isoform_points_dict[exon_list[next_index][0]]) + ':P' + str(isoform_points_dict[exon_list[next_index][1]]) if (region_name_temp in isoform_exons): # this exon is in the isoform if ((exon_list[next_index][0] - 1) == exon_list[index][0]): l += exon_list[next_index][2] index = next_index else: break next_index += 1 return l ########## def comp_len_to_junc_gap_backward(start_index, exon_list, isoform_exons, isoform_points_dict): if (start_index >= len(exon_list)): print 'Warning: Called comp_len_to_junc_gap_backward with start_idx of out of bound' return 0 index = start_index l = 0 next_index = index -1 while (next_index >= 0): region_name_temp = 'P' + str(isoform_points_dict[exon_list[next_index][0]]) + ':P' + str(isoform_points_dict[exon_list[next_index][1]]) if (region_name_temp in isoform_exons): # this exon is in the isoform if ((exon_list[next_index][1] + 1) == exon_list[index][0]): l += exon_list[next_index][2] index = next_index else: break next_index -= 1 return l ########## ### ########## def compute_min_ljust(str_value): return (len(str_value)/10 + 1) * 10 ### Generate gene exons ### Note: RefSeq has range (start_point, end_point] ### Note: RefSeq exon file has range [start_point, end_point] ########## def generate_gene_exons(ref_file_str, exon_file_str): file_read = open(ref_file_str, 'r') file_write = open(exon_file_str, 'w') # Keep exon regions for each genome gene_dict = dict() for line in file_read: fields = line.split() rname = fields[2] gname = fields[0] num_exons = int(fields[8]) start_pos = fields[9].split(',')[:-1] # The sequence ends in , end_pos = fields[10].split(',')[:-1] exon_list = [] for i in range(num_exons): exon_list.append([int(start_pos[i]) + 1, int(end_pos[i])]) if (not gene_dict.has_key(rname)): gene_dict[rname] = dict() if gene_dict[rname].has_key(gname): exon_list += gene_dict[rname][gname] # print gname + ' repeated.' gene_dict[rname][gname] = exon_list # Save the list of exons for each gene for chr in gene_dict.keys(): for gname in gene_dict[chr].keys(): exon_sorted = sorted(gene_dict[chr][gname], key=itemgetter(0, 1)) # Sort by start position then end position i = 1 while (i < len(exon_sorted)): if ((exon_sorted[i][0] == exon_sorted[i - 1][0]) and (exon_sorted[i][1] == exon_sorted[i - 1][1])): del exon_sorted[i] # Delete the repeated exon elif (exon_sorted[i][0] <= exon_sorted[i - 1][1]): temp_exons = sorted([exon_sorted[i][0], exon_sorted[i - 1][0], exon_sorted[i][1], exon_sorted[i - 1][1]]) del exon_sorted[i - 1] # Delete the two overlapping exons del exon_sorted[i - 1] if (temp_exons[0] == temp_exons[1]): # Based on two exons overlap type, re-generate 2 or 3 new ones exon_sorted.insert(i - 1, [temp_exons[1], temp_exons[2]]) exon_sorted.insert(i , [temp_exons[2] + 1, temp_exons[3]]) elif (temp_exons[2] == temp_exons[3]): exon_sorted.insert(i - 1, [temp_exons[0], temp_exons[1] - 1]) exon_sorted.insert(i , [temp_exons[1], temp_exons[2]]) else: exon_sorted.insert(i - 1, [temp_exons[0], temp_exons[1] - 1]) exon_sorted.insert(i, [temp_exons[1], temp_exons[2]]) exon_sorted.insert(i + 1, [temp_exons[2] + 1, temp_exons[3]]) exon_sorted = sorted(exon_sorted, key=itemgetter(0, 1)) # re-sort the exon positions else: i += 1 # After sorting re-evaluate the same index unless there is no change gene_dict[chr][gname] = exon_sorted keys_sorted = sorted(gene_dict.keys()) # Print out the exon positions sorted in chr name and then gene start position for chr in keys_sorted: gene_list = [] for gname in gene_dict[chr].keys(): exon_sorted = sorted(gene_dict[chr][gname], key=itemgetter(0, 1)) # Sort by start position (There should not be any overlap between exons) gene_dict[chr][gname] = exon_sorted gene_list.append([gname, gene_dict[chr][gname][0][0]]) keys = sorted(gene_list, key=itemgetter(1)) for gname in keys: exon_sorted = sorted(gene_dict[chr][gname[0]], key=itemgetter(0, 1)) # Sort by start position (There should not be any overlap between exons) gene_dict[chr][gname[0]] = exon_sorted if (len(gname[0]) < 20): file_write.write(gname[0].ljust(20)) else: file_write.write(gname[0].ljust((len(gname[0]) / 10 + 1) * 10)) file_write.write(str(len(exon_sorted)).ljust(20) + chr.ljust(20) + '\n') for i in range(len(exon_sorted)): file_write.write(str(exon_sorted[i][0]).ljust(20) + str(exon_sorted[i][1]).ljust(20) + (str(exon_sorted[i][1] - exon_sorted[i][0] + 1).ljust(20))) file_write.write('\n') file_read.close() file_write.close() # ######### ### ################### def sanity_check_isoform_regions_length(gene_exons_dict, gene_regions_dict, gene_isoforms_dict, isoforms_regions_len_dict, genes_regions_len_dict): # Sanity check the isoforms regions length for i in gene_exons_dict.keys(): for j in gene_exons_dict[i].keys(): for k in gene_regions_dict[i][j]: if not isoforms_regions_len_dict[i][j].has_key(k): continue region_len = 0 for m in gene_isoforms_dict[i][j]: if not isoforms_regions_len_dict[i][j][k].has_key(m): continue if (region_len == 0): if (isoforms_regions_len_dict[i][j][k][m] > 0): genes_regions_len_dict[i][j][k] = isoforms_regions_len_dict[i][j][k][m] region_len = genes_regions_len_dict[i][j][k] elif (isoforms_regions_len_dict[i][j][k][m] > 0): if (genes_regions_len_dict[i][j][k] != isoforms_regions_len_dict[i][j][k][m]): print 'Isoforms do not match in region length, gene: %s, chr %s' % (j, i) exit(1) ####################### ### ############# def compute_isoform_length(start_pos, end_pos): length = 0; for i in range(len(start_pos)): length += (end_pos[i] - start_pos[i]) # In refSeq format, end point is not included return length ########## # ## Generate gene isoforms # ######### def generate_gene_regions(ref_file_str, exon_file_str, READ_LEN, READ_JUNC_MIN_MAP_LEN): file_read = open(exon_file_str, 'r') # Keep the list of exons for each gene gene_exons_dict = dict() gene_points_dict = dict() gene_regions_dict = dict() isoforms_regions_len_dict = dict() genes_regions_len_dict = dict() num_exons = 0 for line in file_read: if (num_exons == 0): fields = line.split() gname = fields[0] rname = fields[2] num_exons = int(fields[1]) exon_list = [] points_dict = dict() point_index = 0 if (not gene_regions_dict.has_key(rname)): gene_regions_dict[rname] = dict() isoforms_regions_len_dict[rname] = dict() genes_regions_len_dict[rname] = dict() gene_regions_dict[rname][gname] = dict() isoforms_regions_len_dict[rname][gname] = dict() genes_regions_len_dict[rname][gname] = dict() else: fields = line.split() exon_list.append([int(fields[0]), int(fields[1]), int(fields[2])]) points_dict[int(fields[0])] = point_index point_index += 1 points_dict[int(fields[1])] = point_index point_index += 1 if (not gene_exons_dict.has_key(rname)): gene_exons_dict[rname] = dict() gene_points_dict[rname] = dict() gene_exons_dict[rname][gname] = exon_list gene_points_dict[rname][gname] = points_dict num_exons -= 1; file_read.close() # Define input/output file file_read = open(ref_file_str, 'r') # Keep isoforms per gene and exons per isoform gene_isoforms_dict = dict() gene_isoforms_length_dict = dict() for line in file_read: fields = line.split() rname = fields[2] gname = fields[0] isoform_name = fields[1] num_exons = int(fields[8]) start_pos = [int(x) for x in fields[9].split(',')[:-1]] # The sequence ends in , end_pos = [int(x) for x in fields[10].split(',')[:-1]] # Generate list of isoforms isoform_list = [isoform_name] if (not gene_isoforms_dict.has_key(rname)): gene_isoforms_dict[rname] = dict() gene_isoforms_length_dict[rname] = dict() if gene_isoforms_dict[rname].has_key(gname): isoform_list += gene_isoforms_dict[rname][gname] gene_isoforms_dict[rname][gname] = isoform_list gene_isoforms_length_dict[rname][gname + '_' + isoform_name] = compute_isoform_length(start_pos, end_pos) # Generate exon indicator for the isoform exon_list = gene_exons_dict[rname][gname] # Note: Assuming sorted start/end positions region_ind = gene_regions_dict[rname][gname] isoform_exons = [] # Check the exon regions j = 0 for i in range(len(exon_list)): flag = (j < num_exons) while (flag): p0 = exon_list[i][0] p1 = exon_list[i][1] l = exon_list[i][2] if ((p0 >= start_pos[j]) and (p1 <= end_pos[j])): region_name = 'P' + str(gene_points_dict[rname][gname][p0]) + ':' + 'P' + str(gene_points_dict[rname][gname][p1]) if (l >= READ_LEN): # A valid region for exon temp_isoform_name = set() temp_isoform_name.add(isoform_name) #temp_isoform_name = {isoform_name} if (region_ind.has_key(region_name)): temp_isoform_name = temp_isoform_name.union(region_ind[region_name]) else: isoforms_regions_len_dict[rname][gname][region_name] = dict() isoforms_regions_len_dict[rname][gname][region_name][isoform_name] = l - READ_LEN + 1 region_ind[region_name] = temp_isoform_name isoform_exons.append(region_name) flag = False elif (p0 == start_pos[j]): print 'Out-of-order exon position for isoform ' + isoform_name exit(1) elif (p0 < start_pos[j]): flag = False else: j += 1 flag = (j < num_exons) # Check the junction regions for i in range(len(exon_list) - 1): # the last exon can not have any junction p0 = exon_list[i][0] p1 = exon_list[i][1] + 1 # the end_pos is not included l = exon_list[i][2] region_name_temp = 'P' + str(gene_points_dict[rname][gname][p0]) + ':P' + str(gene_points_dict[rname][gname][p1-1]) if (region_name_temp not in isoform_exons): # this exon is not in the isoform continue # compute start and end point of a region that starts with p1 point start = max(p1 - READ_LEN + 1, p0) end = p1 - READ_JUNC_MIN_MAP_LEN + min(comp_len_to_junc_gap_forward(i, exon_list, isoform_exons, gene_points_dict[rname][gname]), READ_JUNC_MIN_MAP_LEN - 1) if (end < start): continue # exon is not long enough to be mapped by a read current_point = start while (current_point <= end): region_name = "" if ((current_point == p0) and (p0 != (p1-1))): # Special case when the exon length is 1 we dont want to repeat the point number region_name += ('P' + str(gene_points_dict[rname][gname][p0]) + '-') region_name += ('P' + str(gene_points_dict[rname][gname][p1 - 1]) + '-') remain_len = READ_LEN - (p1 - current_point) j = i while (j < (len(exon_list) -1)): j += 1 p0_temp = exon_list[j][0] p1_temp = exon_list[j][1] + 1 # the end_pos is not included l_temp = exon_list[j][2] region_name_temp = 'P' + str(gene_points_dict[rname][gname][p0_temp]) + ':P' + str(gene_points_dict[rname][gname][p1_temp-1]) if (region_name_temp not in isoform_exons): # this exon is not in the isoform continue if (l_temp >= remain_len): if ((comp_len_to_junc_gap_backward(j, exon_list, isoform_exons, gene_points_dict[rname][gname]) + remain_len ) >= READ_JUNC_MIN_MAP_LEN): region_name += ('P' + str(gene_points_dict[rname][gname][p0_temp])) if ((l_temp == remain_len) and (p0_temp != (p1_temp -1))): # Special case when the exon length is 1 we dont want to repeat the point number region_name += ('-P' + str(gene_points_dict[rname][gname][p1_temp - 1])) temp_isoform_name = set() temp_isoform_name.add(isoform_name) #temp_isoform_name = {isoform_name} if (region_ind.has_key(region_name)): temp_isoform_name = temp_isoform_name.union(region_ind[region_name]) else: isoforms_regions_len_dict[rname][gname][region_name] = dict() region_ind[region_name] = temp_isoform_name if (isoforms_regions_len_dict[rname][gname][region_name].has_key(isoform_name)): isoforms_regions_len_dict[rname][gname][region_name][isoform_name] += 1 else: isoforms_regions_len_dict[rname][gname][region_name][isoform_name] = 1 break # Found possible region for this current_point else: remain_len -= l_temp region_name += ('P' + str(gene_points_dict[rname][gname][p0_temp]) + '-') if (p0_temp != (p1_temp -1)): # Special case when the exon length is 1 we dont want to repeat the point number region_name += ('P' + str(gene_points_dict[rname][gname][p1_temp - 1]) + '-') current_point += 1 gene_regions_dict[rname][gname] = region_ind sanity_check_isoform_regions_length(gene_exons_dict, gene_regions_dict, gene_isoforms_dict, isoforms_regions_len_dict, genes_regions_len_dict) file_read.close() return [gene_exons_dict, gene_points_dict, gene_isoforms_dict, genes_regions_len_dict, isoforms_regions_len_dict, gene_regions_dict, gene_isoforms_length_dict] # ######### # ## Generate gene regions output # ######################## def generate_regions_output_file(gene_exons_dict, gene_points_dict, gene_isoforms_dict, genes_regions_len_dict, isoforms_regions_len_dict, gene_regions_dict, gene_isoforms_length_dict, regions_file_str): file_write = open(regions_file_str, 'w') keys1 = sorted(gene_exons_dict.keys()) for i in keys1: keys2 = sorted(gene_exons_dict[i].keys()) for j in keys2: keys3 = sorted(gene_points_dict[i][j].keys()) keys5 = sorted(gene_isoforms_dict[i][j]) file_write.write(j.ljust(max(compute_min_ljust(j), 20))) file_write.write(str(len(gene_isoforms_dict[i][j])).ljust(10) + i.ljust(10) + '\n') for m in keys5: file_write.write(m.ljust(max(compute_min_ljust(m), 20))) file_write.write('\n') for m in keys5: file_write.write(str(gene_isoforms_length_dict[i][j + '_' + m]).ljust(max(compute_min_ljust(m), 20))) file_write.write('\n') for k in keys3: file_write.write(('P' + str(gene_points_dict[i][j][k])).ljust(20)) file_write.write('\n') for k in keys3: file_write.write(str(k).ljust(20)) file_write.write('\n') keys4 = sorted(gene_regions_dict[i][j]) for l in keys4: file_write.write(l.ljust(max(compute_min_ljust(l), 20))) file_write.write('\n') for m in keys5: for l in keys4: if m in gene_regions_dict[i][j][l]: if (isoforms_regions_len_dict[i][j][l][m] > 0): file_write.write(str(1).ljust(max(compute_min_ljust(l), 20))) else: # This condition should never be satisfied file_write.write(str(0).ljust(max(compute_min_ljust(l), 20))) else: file_write.write(str(0).ljust(max(compute_min_ljust(l), 20))) file_write.write('\n') for l in keys4: file_write.write(str(genes_regions_len_dict[i][j][l]).ljust(max(compute_min_ljust(l), 20))) file_write.write('\n') file_write.close() # ######### # ## Main # ######### def main(): # Read input parameters ref_file_str = sys.argv[1] READ_LEN = int(sys.argv[2]) READ_JUNC_MIN_MAP_LEN = int(sys.argv[3]) # define exon filename fields = ref_file_str.split('.') exon_file_str = fields[0] + '_exons' for i in range(len(fields) - 1): exon_file_str += '.' + fields[i + 1] print 'Generating exon file.' generate_gene_exons(ref_file_str, exon_file_str) # define isoform filename fields = ref_file_str.split('.') regions_file_str = fields[0] + '_regions' for i in range(len(fields) - 1): regions_file_str += '.' + fields[i + 1] print 'Generating regions file.' start_time = datetime.now() print 'Generating regions file started at ' + str(start_time) [gene_exons_dict, gene_points_dict, gene_isoforms_dict, genes_regions_len_dict, isoforms_regions_len_dict, gene_regions_dict, gene_isoforms_length_dict] = generate_gene_regions(ref_file_str, exon_file_str, READ_LEN, READ_JUNC_MIN_MAP_LEN) generate_regions_output_file(gene_exons_dict, gene_points_dict, gene_isoforms_dict, genes_regions_len_dict, isoforms_regions_len_dict, gene_regions_dict, gene_isoforms_length_dict, regions_file_str) end_time = datetime.now() print 'Generating regions file ended at ' + str(end_time) if __name__ == '__main__': main()
amjadm61/bedrock
refs/heads/master
vendor-local/lib/python/south/tests/__init__.py
93
from __future__ import print_function #import unittest import os import sys from functools import wraps from django.conf import settings from south.hacks import hacks # Make sure skipping tests is available. try: # easiest and best is unittest included in Django>=1.3 from django.utils import unittest except ImportError: # earlier django... use unittest from stdlib import unittest # however, skipUnless was only added in Python 2.7; # if not available, we need to do something else try: skipUnless = unittest.skipUnless #@UnusedVariable except AttributeError: def skipUnless(condition, message): def decorator(testfunc): @wraps(testfunc) def wrapper(self): if condition: # Apply method testfunc(self) else: # The skip exceptions are not available either... print("Skipping", testfunc.__name__,"--", message) return wrapper return decorator # ditto for skipIf try: skipIf = unittest.skipIf #@UnusedVariable except AttributeError: def skipIf(condition, message): def decorator(testfunc): @wraps(testfunc) def wrapper(self): if condition: print("Skipping", testfunc.__name__,"--", message) else: # Apply method testfunc(self) return wrapper return decorator # Add the tests directory so fakeapp is on sys.path test_root = os.path.dirname(__file__) sys.path.append(test_root) # Note: the individual test files are imported below this. class Monkeypatcher(unittest.TestCase): """ Base test class for tests that play with the INSTALLED_APPS setting at runtime. """ def create_fake_app(self, name): class Fake: pass fake = Fake() fake.__name__ = name try: fake.migrations = __import__(name + ".migrations", {}, {}, ['migrations']) except ImportError: pass return fake def setUp(self): """ Changes the Django environment so we can run tests against our test apps. """ if hasattr(self, 'installed_apps'): hacks.store_app_cache_state() hacks.set_installed_apps(self.installed_apps) # Make sure dependencies are calculated for new apps Migrations._dependencies_done = False def tearDown(self): """ Undoes what setUp did. """ if hasattr(self, 'installed_apps'): hacks.reset_installed_apps() hacks.restore_app_cache_state() # Try importing all tests if asked for (then we can run 'em) try: skiptest = settings.SKIP_SOUTH_TESTS except: skiptest = True if not skiptest: from south.tests.db import * from south.tests.db_mysql import * from south.tests.db_firebird import * from south.tests.logic import * from south.tests.autodetection import * from south.tests.logger import * from south.tests.inspector import * from south.tests.freezer import *
balint256/gr-baz
refs/heads/master
python/introspective_xmlrpc_server.py
1
#!/usr/bin/env python # -*- coding: utf-8 -*- # # untitled.py # # Copyright 2014 Balint Seeber <balint@crawfish> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. # # from SimpleXMLRPCServer import SimpleXMLRPCServer #from SimpleXMLRPCServer import SimpleXMLRPCRequestHandler import inspect class IntrospectiveXMLRPCServer(SimpleXMLRPCServer): def __init__(self, address, signatures={}, logRequests=True, *kargs, **kwargs): self.signatures = signatures SimpleXMLRPCServer.__init__(self, address, logRequests=logRequests, *kargs, **kwargs) #print "Init" def system_methodSignature(self, method_name): try: arg_list = [] ret_val = '' if method_name in self.signatures: arg_list = self.signatures[method_name] if isinstance(arg_list, tuple): ret_val = arg_list[0] arg_list = arg_list[1] elif method_name in self.funcs: func = self.funcs[method_name] spec = inspect.getargspec(func) arg_list = spec.args elif self.instance is not None: #if len(method_name) > 4 and method_name[:4] == "get_": # base_name #if hasattr(self.instance, "get_"+method_name+"_sig"): # func = getattr(self.instance, "get_"+method_name+"_sig") # arg_list = func() if hasattr(self.instance, method_name): func = getattr(self.instance, method_name) spec = inspect.getargspec(func) arg_list = spec.args[1:] # Ignore 'self' #return str(inspect.signature(func)) # v3.4 return [[ret_val] + arg_list] except KeyError, e: err_str = 'method "%s" can not be found' % method_name #raise Exception(err_str) print err_str except Exception, e: print "Exception:", e return [[]] def main(): return 0 if __name__ == '__main__': main()
pattisdr/osf.io
refs/heads/develop
api/tokens/urls.py
3
from django.conf.urls import url from api.tokens import views app_name = 'osf' urlpatterns = [ url(r'^$', views.TokenList.as_view(), name='token-list'), url(r'^(?P<_id>\w+)/$', views.TokenDetail.as_view(), name='token-detail'), ]
anbasile/flask_sample
refs/heads/master
flask/lib/python2.7/site-packages/setuptools/command/upload_docs.py
390
# -*- coding: utf-8 -*- """upload_docs Implements a Distutils 'upload_docs' subcommand (upload documentation to PyPI's pythonhosted.org). """ from base64 import standard_b64encode from distutils import log from distutils.errors import DistutilsOptionError from distutils.command.upload import upload import os import socket import zipfile import tempfile import sys import shutil from setuptools.compat import httplib, urlparse, unicode, iteritems, PY3 from pkg_resources import iter_entry_points errors = 'surrogateescape' if PY3 else 'strict' # This is not just a replacement for byte literals # but works as a general purpose encoder def b(s, encoding='utf-8'): if isinstance(s, unicode): return s.encode(encoding, errors) return s class upload_docs(upload): description = 'Upload documentation to PyPI' user_options = [ ('repository=', 'r', "url of repository [default: %s]" % upload.DEFAULT_REPOSITORY), ('show-response', None, 'display full response text from server'), ('upload-dir=', None, 'directory to upload'), ] boolean_options = upload.boolean_options def has_sphinx(self): if self.upload_dir is None: for ep in iter_entry_points('distutils.commands', 'build_sphinx'): return True sub_commands = [('build_sphinx', has_sphinx)] def initialize_options(self): upload.initialize_options(self) self.upload_dir = None self.target_dir = None def finalize_options(self): upload.finalize_options(self) if self.upload_dir is None: if self.has_sphinx(): build_sphinx = self.get_finalized_command('build_sphinx') self.target_dir = build_sphinx.builder_target_dir else: build = self.get_finalized_command('build') self.target_dir = os.path.join(build.build_base, 'docs') else: self.ensure_dirname('upload_dir') self.target_dir = self.upload_dir self.announce('Using upload directory %s' % self.target_dir) def create_zipfile(self, filename): zip_file = zipfile.ZipFile(filename, "w") try: self.mkpath(self.target_dir) # just in case for root, dirs, files in os.walk(self.target_dir): if root == self.target_dir and not files: raise DistutilsOptionError( "no files found in upload directory '%s'" % self.target_dir) for name in files: full = os.path.join(root, name) relative = root[len(self.target_dir):].lstrip(os.path.sep) dest = os.path.join(relative, name) zip_file.write(full, dest) finally: zip_file.close() def run(self): # Run sub commands for cmd_name in self.get_sub_commands(): self.run_command(cmd_name) tmp_dir = tempfile.mkdtemp() name = self.distribution.metadata.get_name() zip_file = os.path.join(tmp_dir, "%s.zip" % name) try: self.create_zipfile(zip_file) self.upload_file(zip_file) finally: shutil.rmtree(tmp_dir) def upload_file(self, filename): f = open(filename, 'rb') content = f.read() f.close() meta = self.distribution.metadata data = { ':action': 'doc_upload', 'name': meta.get_name(), 'content': (os.path.basename(filename), content), } # set up the authentication credentials = b(self.username + ':' + self.password) credentials = standard_b64encode(credentials) if PY3: credentials = credentials.decode('ascii') auth = "Basic " + credentials # Build up the MIME payload for the POST data boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' sep_boundary = b('\n--') + b(boundary) end_boundary = sep_boundary + b('--') body = [] for key, values in iteritems(data): title = '\nContent-Disposition: form-data; name="%s"' % key # handle multiple entries for the same name if not isinstance(values, list): values = [values] for value in values: if type(value) is tuple: title += '; filename="%s"' % value[0] value = value[1] else: value = b(value) body.append(sep_boundary) body.append(b(title)) body.append(b("\n\n")) body.append(value) if value and value[-1:] == b('\r'): body.append(b('\n')) # write an extra newline (lurve Macs) body.append(end_boundary) body.append(b("\n")) body = b('').join(body) self.announce("Submitting documentation to %s" % (self.repository), log.INFO) # build the Request # We can't use urllib2 since we need to send the Basic # auth right with the first request schema, netloc, url, params, query, fragments = \ urlparse(self.repository) assert not params and not query and not fragments if schema == 'http': conn = httplib.HTTPConnection(netloc) elif schema == 'https': conn = httplib.HTTPSConnection(netloc) else: raise AssertionError("unsupported schema " + schema) data = '' try: conn.connect() conn.putrequest("POST", url) content_type = 'multipart/form-data; boundary=%s' % boundary conn.putheader('Content-type', content_type) conn.putheader('Content-length', str(len(body))) conn.putheader('Authorization', auth) conn.endheaders() conn.send(body) except socket.error as e: self.announce(str(e), log.ERROR) return r = conn.getresponse() if r.status == 200: self.announce('Server response (%s): %s' % (r.status, r.reason), log.INFO) elif r.status == 301: location = r.getheader('Location') if location is None: location = 'https://pythonhosted.org/%s/' % meta.get_name() self.announce('Upload successful. Visit %s' % location, log.INFO) else: self.announce('Upload failed (%s): %s' % (r.status, r.reason), log.ERROR) if self.show_response: print('-' * 75, r.read(), '-' * 75)
google/orbit
refs/heads/main
third_party/conan/recipes/llvm_textapi/conanfile.py
1
from conans import python_requires import os common = python_requires('llvm-common/0.0.3@orbitdeps/stable') class LLVMObject(common.LLVMModulePackage): version = common.LLVMModulePackage.version name = 'llvm_textapi' llvm_component = 'llvm' llvm_module = 'TextAPI' llvm_requires = ['llvm_headers', 'llvm_binary_format', 'llvm_support' ]
unor/schemaorg
refs/heads/master
lib/isodate/isodatetime.py
12
############################################################################## # Copyright 2009, Gerhard Weis # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the authors nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT ############################################################################## ''' This module defines a method to parse an ISO 8601:2004 date time string. For this job it uses the parse_date and parse_time methods defined in date and time module. ''' from datetime import datetime from isodate.isostrf import strftime from isodate.isostrf import DATE_EXT_COMPLETE, TIME_EXT_COMPLETE, TZ_EXT from isodate.isodates import parse_date from isodate.isotime import parse_time def parse_datetime(datetimestring): ''' Parses ISO 8601 date-times into datetime.datetime objects. This function uses parse_date and parse_time to do the job, so it allows more combinations of date and time representations, than the actual ISO 8601:2004 standard allows. ''' datestring, timestring = datetimestring.split('T') tmpdate = parse_date(datestring) tmptime = parse_time(timestring) return datetime.combine(tmpdate, tmptime) def datetime_isoformat(tdt, format=DATE_EXT_COMPLETE + 'T' + TIME_EXT_COMPLETE + TZ_EXT): ''' Format datetime strings. This method is just a wrapper around isodate.isostrf.strftime and uses Extended-Complete as default format. ''' return strftime(tdt, format)
pacogomez/pyvcloud
refs/heads/master
pyvcloud/vcd/test.py
1
# VMware vCloud Director Python SDK # Copyright (c) 2017 VMware, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest import warnings import requests import yaml from pyvcloud.vcd.client import BasicLoginCredentials from pyvcloud.vcd.client import Client class TestCase(unittest.TestCase): @classmethod def setUpClass(cls): config_file = 'config.yml' if 'VCD_TEST_CONFIG_FILE' in os.environ: config_file = os.environ['VCD_TEST_CONFIG_FILE'] with open(config_file, 'r') as f: cls.config = yaml.load(f) if not cls.config['vcd']['verify'] and \ cls.config['vcd']['disable_ssl_warnings']: requests.packages.urllib3.disable_warnings() cls.client = Client( cls.config['vcd']['host'], api_version=cls.config['vcd']['api_version'], verify_ssl_certs=cls.config['vcd']['verify'], log_file='pyvcloud.log', log_requests=True, log_headers=True, log_bodies=True) cls.client.set_credentials( BasicLoginCredentials(cls.config['vcd']['user'], cls.config['vcd']['org'], cls.config['vcd']['password'])) @classmethod def tearDownClass(cls): with warnings.catch_warnings(): warnings.simplefilter("ignore", ResourceWarning) cls.client.logout()
susansalkeld/discsongs
refs/heads/master
discsongs/lib/python2.7/site-packages/werkzeug/script.py
147
# -*- coding: utf-8 -*- r''' werkzeug.script ~~~~~~~~~~~~~~~ .. admonition:: Deprecated Functionality ``werkzeug.script`` is deprecated without replacement functionality. Python's command line support improved greatly with :mod:`argparse` and a bunch of alternative modules. Most of the time you have recurring tasks while writing an application such as starting up an interactive python interpreter with some prefilled imports, starting the development server, initializing the database or something similar. For that purpose werkzeug provides the `werkzeug.script` module which helps you writing such scripts. Basic Usage ----------- The following snippet is roughly the same in every werkzeug script:: #!/usr/bin/env python # -*- coding: utf-8 -*- from werkzeug import script # actions go here if __name__ == '__main__': script.run() Starting this script now does nothing because no actions are defined. An action is a function in the same module starting with ``"action_"`` which takes a number of arguments where every argument has a default. The type of the default value specifies the type of the argument. Arguments can then be passed by position or using ``--name=value`` from the shell. Because a runserver and shell command is pretty common there are two factory functions that create such commands:: def make_app(): from yourapplication import YourApplication return YourApplication(...) action_runserver = script.make_runserver(make_app, use_reloader=True) action_shell = script.make_shell(lambda: {'app': make_app()}) Using The Scripts ----------------- The script from above can be used like this from the shell now: .. sourcecode:: text $ ./manage.py --help $ ./manage.py runserver localhost 8080 --debugger --no-reloader $ ./manage.py runserver -p 4000 $ ./manage.py shell As you can see it's possible to pass parameters as positional arguments or as named parameters, pretty much like Python function calls. :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. ''' from __future__ import print_function import sys import inspect import getopt from os.path import basename from werkzeug._compat import iteritems argument_types = { bool: 'boolean', str: 'string', int: 'integer', float: 'float' } converters = { 'boolean': lambda x: x.lower() in ('1', 'true', 'yes', 'on'), 'string': str, 'integer': int, 'float': float } def run(namespace=None, action_prefix='action_', args=None): """Run the script. Participating actions are looked up in the caller's namespace if no namespace is given, otherwise in the dict provided. Only items that start with action_prefix are processed as actions. If you want to use all items in the namespace provided as actions set action_prefix to an empty string. :param namespace: An optional dict where the functions are looked up in. By default the local namespace of the caller is used. :param action_prefix: The prefix for the functions. Everything else is ignored. :param args: the arguments for the function. If not specified :data:`sys.argv` without the first argument is used. """ if namespace is None: namespace = sys._getframe(1).f_locals actions = find_actions(namespace, action_prefix) if args is None: args = sys.argv[1:] if not args or args[0] in ('-h', '--help'): return print_usage(actions) elif args[0] not in actions: fail('Unknown action \'%s\'' % args[0]) arguments = {} types = {} key_to_arg = {} long_options = [] formatstring = '' func, doc, arg_def = actions[args.pop(0)] for idx, (arg, shortcut, default, option_type) in enumerate(arg_def): real_arg = arg.replace('-', '_') if shortcut: formatstring += shortcut if not isinstance(default, bool): formatstring += ':' key_to_arg['-' + shortcut] = real_arg long_options.append(isinstance(default, bool) and arg or arg + '=') key_to_arg['--' + arg] = real_arg key_to_arg[idx] = real_arg types[real_arg] = option_type arguments[real_arg] = default try: optlist, posargs = getopt.gnu_getopt(args, formatstring, long_options) except getopt.GetoptError as e: fail(str(e)) specified_arguments = set() for key, value in enumerate(posargs): try: arg = key_to_arg[key] except IndexError: fail('Too many parameters') specified_arguments.add(arg) try: arguments[arg] = converters[types[arg]](value) except ValueError: fail('Invalid value for argument %s (%s): %s' % (key, arg, value)) for key, value in optlist: arg = key_to_arg[key] if arg in specified_arguments: fail('Argument \'%s\' is specified twice' % arg) if types[arg] == 'boolean': if arg.startswith('no_'): value = 'no' else: value = 'yes' try: arguments[arg] = converters[types[arg]](value) except ValueError: fail('Invalid value for \'%s\': %s' % (key, value)) newargs = {} for k, v in iteritems(arguments): newargs[k.startswith('no_') and k[3:] or k] = v arguments = newargs return func(**arguments) def fail(message, code=-1): """Fail with an error.""" print('Error: %s' % message, file=sys.stderr) sys.exit(code) def find_actions(namespace, action_prefix): """Find all the actions in the namespace.""" actions = {} for key, value in iteritems(namespace): if key.startswith(action_prefix): actions[key[len(action_prefix):]] = analyse_action(value) return actions def print_usage(actions): """Print the usage information. (Help screen)""" actions = actions.items() actions.sort() print('usage: %s <action> [<options>]' % basename(sys.argv[0])) print(' %s --help' % basename(sys.argv[0])) print() print('actions:') for name, (func, doc, arguments) in actions: print(' %s:' % name) for line in doc.splitlines(): print(' %s' % line) if arguments: print() for arg, shortcut, default, argtype in arguments: if isinstance(default, bool): print(' %s' % ( (shortcut and '-%s, ' % shortcut or '') + '--' + arg )) else: print(' %-30s%-10s%s' % ( (shortcut and '-%s, ' % shortcut or '') + '--' + arg, argtype, default )) print() def analyse_action(func): """Analyse a function.""" description = inspect.getdoc(func) or 'undocumented action' arguments = [] args, varargs, kwargs, defaults = inspect.getargspec(func) if varargs or kwargs: raise TypeError('variable length arguments for action not allowed.') if len(args) != len(defaults or ()): raise TypeError('not all arguments have proper definitions') for idx, (arg, definition) in enumerate(zip(args, defaults or ())): if arg.startswith('_'): raise TypeError('arguments may not start with an underscore') if not isinstance(definition, tuple): shortcut = None default = definition else: shortcut, default = definition argument_type = argument_types[type(default)] if isinstance(default, bool) and default is True: arg = 'no-' + arg arguments.append((arg.replace('_', '-'), shortcut, default, argument_type)) return func, description, arguments def make_shell(init_func=None, banner=None, use_ipython=True): """Returns an action callback that spawns a new interactive python shell. :param init_func: an optional initialization function that is called before the shell is started. The return value of this function is the initial namespace. :param banner: the banner that is displayed before the shell. If not specified a generic banner is used instead. :param use_ipython: if set to `True` ipython is used if available. """ if banner is None: banner = 'Interactive Werkzeug Shell' if init_func is None: init_func = dict def action(ipython=use_ipython): """Start a new interactive python session.""" namespace = init_func() if ipython: try: try: from IPython.frontend.terminal.embed import InteractiveShellEmbed sh = InteractiveShellEmbed(banner1=banner) except ImportError: from IPython.Shell import IPShellEmbed sh = IPShellEmbed(banner=banner) except ImportError: pass else: sh(global_ns={}, local_ns=namespace) return from code import interact interact(banner, local=namespace) return action def make_runserver(app_factory, hostname='localhost', port=5000, use_reloader=False, use_debugger=False, use_evalex=True, threaded=False, processes=1, static_files=None, extra_files=None, ssl_context=None): """Returns an action callback that spawns a new development server. .. versionadded:: 0.5 `static_files` and `extra_files` was added. ..versionadded:: 0.6.1 `ssl_context` was added. :param app_factory: a function that returns a new WSGI application. :param hostname: the default hostname the server should listen on. :param port: the default port of the server. :param use_reloader: the default setting for the reloader. :param use_evalex: the default setting for the evalex flag of the debugger. :param threaded: the default threading setting. :param processes: the default number of processes to start. :param static_files: optional dict of static files. :param extra_files: optional list of extra files to track for reloading. :param ssl_context: optional SSL context for running server in HTTPS mode. """ def action(hostname=('h', hostname), port=('p', port), reloader=use_reloader, debugger=use_debugger, evalex=use_evalex, threaded=threaded, processes=processes): """Start a new development server.""" from werkzeug.serving import run_simple app = app_factory() run_simple(hostname, port, app, reloader, debugger, evalex, extra_files, 1, threaded, processes, static_files=static_files, ssl_context=ssl_context) return action
juanleal/perfilResolve
refs/heads/master
node_modules/laravel-elixir/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSSettings_test.py
778
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Unit tests for the MSVSSettings.py file.""" import StringIO import unittest import gyp.MSVSSettings as MSVSSettings class TestSequenceFunctions(unittest.TestCase): def setUp(self): self.stderr = StringIO.StringIO() def _ExpectedWarnings(self, expected): """Compares recorded lines to expected warnings.""" self.stderr.seek(0) actual = self.stderr.read().split('\n') actual = [line for line in actual if line] self.assertEqual(sorted(expected), sorted(actual)) def testValidateMSVSSettings_tool_names(self): """Tests that only MSVS tool names are allowed.""" MSVSSettings.ValidateMSVSSettings( {'VCCLCompilerTool': {}, 'VCLinkerTool': {}, 'VCMIDLTool': {}, 'foo': {}, 'VCResourceCompilerTool': {}, 'VCLibrarianTool': {}, 'VCManifestTool': {}, 'ClCompile': {}}, self.stderr) self._ExpectedWarnings([ 'Warning: unrecognized tool foo', 'Warning: unrecognized tool ClCompile']) def testValidateMSVSSettings_settings(self): """Tests that for invalid MSVS settings.""" MSVSSettings.ValidateMSVSSettings( {'VCCLCompilerTool': { 'AdditionalIncludeDirectories': 'folder1;folder2', 'AdditionalOptions': ['string1', 'string2'], 'AdditionalUsingDirectories': 'folder1;folder2', 'AssemblerListingLocation': 'a_file_name', 'AssemblerOutput': '0', 'BasicRuntimeChecks': '5', 'BrowseInformation': 'fdkslj', 'BrowseInformationFile': 'a_file_name', 'BufferSecurityCheck': 'true', 'CallingConvention': '-1', 'CompileAs': '1', 'DebugInformationFormat': '2', 'DefaultCharIsUnsigned': 'true', 'Detect64BitPortabilityProblems': 'true', 'DisableLanguageExtensions': 'true', 'DisableSpecificWarnings': 'string1;string2', 'EnableEnhancedInstructionSet': '1', 'EnableFiberSafeOptimizations': 'true', 'EnableFunctionLevelLinking': 'true', 'EnableIntrinsicFunctions': 'true', 'EnablePREfast': 'true', 'Enableprefast': 'bogus', 'ErrorReporting': '1', 'ExceptionHandling': '1', 'ExpandAttributedSource': 'true', 'FavorSizeOrSpeed': '1', 'FloatingPointExceptions': 'true', 'FloatingPointModel': '1', 'ForceConformanceInForLoopScope': 'true', 'ForcedIncludeFiles': 'file1;file2', 'ForcedUsingFiles': 'file1;file2', 'GeneratePreprocessedFile': '1', 'GenerateXMLDocumentationFiles': 'true', 'IgnoreStandardIncludePath': 'true', 'InlineFunctionExpansion': '1', 'KeepComments': 'true', 'MinimalRebuild': 'true', 'ObjectFile': 'a_file_name', 'OmitDefaultLibName': 'true', 'OmitFramePointers': 'true', 'OpenMP': 'true', 'Optimization': '1', 'PrecompiledHeaderFile': 'a_file_name', 'PrecompiledHeaderThrough': 'a_file_name', 'PreprocessorDefinitions': 'string1;string2', 'ProgramDataBaseFileName': 'a_file_name', 'RuntimeLibrary': '1', 'RuntimeTypeInfo': 'true', 'ShowIncludes': 'true', 'SmallerTypeCheck': 'true', 'StringPooling': 'true', 'StructMemberAlignment': '1', 'SuppressStartupBanner': 'true', 'TreatWChar_tAsBuiltInType': 'true', 'UndefineAllPreprocessorDefinitions': 'true', 'UndefinePreprocessorDefinitions': 'string1;string2', 'UseFullPaths': 'true', 'UsePrecompiledHeader': '1', 'UseUnicodeResponseFiles': 'true', 'WarnAsError': 'true', 'WarningLevel': '1', 'WholeProgramOptimization': 'true', 'XMLDocumentationFileName': 'a_file_name', 'ZZXYZ': 'bogus'}, 'VCLinkerTool': { 'AdditionalDependencies': 'file1;file2', 'AdditionalLibraryDirectories': 'folder1;folder2', 'AdditionalManifestDependencies': 'file1;file2', 'AdditionalOptions': 'a string1', 'AddModuleNamesToAssembly': 'file1;file2', 'AllowIsolation': 'true', 'AssemblyDebug': '2', 'AssemblyLinkResource': 'file1;file2', 'BaseAddress': 'a string1', 'CLRImageType': '2', 'CLRThreadAttribute': '2', 'CLRUnmanagedCodeCheck': 'true', 'DataExecutionPrevention': '2', 'DelayLoadDLLs': 'file1;file2', 'DelaySign': 'true', 'Driver': '2', 'EmbedManagedResourceFile': 'file1;file2', 'EnableCOMDATFolding': '2', 'EnableUAC': 'true', 'EntryPointSymbol': 'a string1', 'ErrorReporting': '2', 'FixedBaseAddress': '2', 'ForceSymbolReferences': 'file1;file2', 'FunctionOrder': 'a_file_name', 'GenerateDebugInformation': 'true', 'GenerateManifest': 'true', 'GenerateMapFile': 'true', 'HeapCommitSize': 'a string1', 'HeapReserveSize': 'a string1', 'IgnoreAllDefaultLibraries': 'true', 'IgnoreDefaultLibraryNames': 'file1;file2', 'IgnoreEmbeddedIDL': 'true', 'IgnoreImportLibrary': 'true', 'ImportLibrary': 'a_file_name', 'KeyContainer': 'a_file_name', 'KeyFile': 'a_file_name', 'LargeAddressAware': '2', 'LinkIncremental': '2', 'LinkLibraryDependencies': 'true', 'LinkTimeCodeGeneration': '2', 'ManifestFile': 'a_file_name', 'MapExports': 'true', 'MapFileName': 'a_file_name', 'MergedIDLBaseFileName': 'a_file_name', 'MergeSections': 'a string1', 'MidlCommandFile': 'a_file_name', 'ModuleDefinitionFile': 'a_file_name', 'OptimizeForWindows98': '1', 'OptimizeReferences': '2', 'OutputFile': 'a_file_name', 'PerUserRedirection': 'true', 'Profile': 'true', 'ProfileGuidedDatabase': 'a_file_name', 'ProgramDatabaseFile': 'a_file_name', 'RandomizedBaseAddress': '2', 'RegisterOutput': 'true', 'ResourceOnlyDLL': 'true', 'SetChecksum': 'true', 'ShowProgress': '2', 'StackCommitSize': 'a string1', 'StackReserveSize': 'a string1', 'StripPrivateSymbols': 'a_file_name', 'SubSystem': '2', 'SupportUnloadOfDelayLoadedDLL': 'true', 'SuppressStartupBanner': 'true', 'SwapRunFromCD': 'true', 'SwapRunFromNet': 'true', 'TargetMachine': '2', 'TerminalServerAware': '2', 'TurnOffAssemblyGeneration': 'true', 'TypeLibraryFile': 'a_file_name', 'TypeLibraryResourceID': '33', 'UACExecutionLevel': '2', 'UACUIAccess': 'true', 'UseLibraryDependencyInputs': 'true', 'UseUnicodeResponseFiles': 'true', 'Version': 'a string1'}, 'VCMIDLTool': { 'AdditionalIncludeDirectories': 'folder1;folder2', 'AdditionalOptions': 'a string1', 'CPreprocessOptions': 'a string1', 'DefaultCharType': '1', 'DLLDataFileName': 'a_file_name', 'EnableErrorChecks': '1', 'ErrorCheckAllocations': 'true', 'ErrorCheckBounds': 'true', 'ErrorCheckEnumRange': 'true', 'ErrorCheckRefPointers': 'true', 'ErrorCheckStubData': 'true', 'GenerateStublessProxies': 'true', 'GenerateTypeLibrary': 'true', 'HeaderFileName': 'a_file_name', 'IgnoreStandardIncludePath': 'true', 'InterfaceIdentifierFileName': 'a_file_name', 'MkTypLibCompatible': 'true', 'notgood': 'bogus', 'OutputDirectory': 'a string1', 'PreprocessorDefinitions': 'string1;string2', 'ProxyFileName': 'a_file_name', 'RedirectOutputAndErrors': 'a_file_name', 'StructMemberAlignment': '1', 'SuppressStartupBanner': 'true', 'TargetEnvironment': '1', 'TypeLibraryName': 'a_file_name', 'UndefinePreprocessorDefinitions': 'string1;string2', 'ValidateParameters': 'true', 'WarnAsError': 'true', 'WarningLevel': '1'}, 'VCResourceCompilerTool': { 'AdditionalOptions': 'a string1', 'AdditionalIncludeDirectories': 'folder1;folder2', 'Culture': '1003', 'IgnoreStandardIncludePath': 'true', 'notgood2': 'bogus', 'PreprocessorDefinitions': 'string1;string2', 'ResourceOutputFileName': 'a string1', 'ShowProgress': 'true', 'SuppressStartupBanner': 'true', 'UndefinePreprocessorDefinitions': 'string1;string2'}, 'VCLibrarianTool': { 'AdditionalDependencies': 'file1;file2', 'AdditionalLibraryDirectories': 'folder1;folder2', 'AdditionalOptions': 'a string1', 'ExportNamedFunctions': 'string1;string2', 'ForceSymbolReferences': 'a string1', 'IgnoreAllDefaultLibraries': 'true', 'IgnoreSpecificDefaultLibraries': 'file1;file2', 'LinkLibraryDependencies': 'true', 'ModuleDefinitionFile': 'a_file_name', 'OutputFile': 'a_file_name', 'SuppressStartupBanner': 'true', 'UseUnicodeResponseFiles': 'true'}, 'VCManifestTool': { 'AdditionalManifestFiles': 'file1;file2', 'AdditionalOptions': 'a string1', 'AssemblyIdentity': 'a string1', 'ComponentFileName': 'a_file_name', 'DependencyInformationFile': 'a_file_name', 'GenerateCatalogFiles': 'true', 'InputResourceManifests': 'a string1', 'ManifestResourceFile': 'a_file_name', 'OutputManifestFile': 'a_file_name', 'RegistrarScriptFile': 'a_file_name', 'ReplacementsFile': 'a_file_name', 'SuppressStartupBanner': 'true', 'TypeLibraryFile': 'a_file_name', 'UpdateFileHashes': 'truel', 'UpdateFileHashesSearchPath': 'a_file_name', 'UseFAT32Workaround': 'true', 'UseUnicodeResponseFiles': 'true', 'VerboseOutput': 'true'}}, self.stderr) self._ExpectedWarnings([ 'Warning: for VCCLCompilerTool/BasicRuntimeChecks, ' 'index value (5) not in expected range [0, 4)', 'Warning: for VCCLCompilerTool/BrowseInformation, ' "invalid literal for int() with base 10: 'fdkslj'", 'Warning: for VCCLCompilerTool/CallingConvention, ' 'index value (-1) not in expected range [0, 3)', 'Warning: for VCCLCompilerTool/DebugInformationFormat, ' 'converted value for 2 not specified.', 'Warning: unrecognized setting VCCLCompilerTool/Enableprefast', 'Warning: unrecognized setting VCCLCompilerTool/ZZXYZ', 'Warning: for VCLinkerTool/TargetMachine, ' 'converted value for 2 not specified.', 'Warning: unrecognized setting VCMIDLTool/notgood', 'Warning: unrecognized setting VCResourceCompilerTool/notgood2', 'Warning: for VCManifestTool/UpdateFileHashes, ' "expected bool; got 'truel'" '']) def testValidateMSBuildSettings_settings(self): """Tests that for invalid MSBuild settings.""" MSVSSettings.ValidateMSBuildSettings( {'ClCompile': { 'AdditionalIncludeDirectories': 'folder1;folder2', 'AdditionalOptions': ['string1', 'string2'], 'AdditionalUsingDirectories': 'folder1;folder2', 'AssemblerListingLocation': 'a_file_name', 'AssemblerOutput': 'NoListing', 'BasicRuntimeChecks': 'StackFrameRuntimeCheck', 'BrowseInformation': 'false', 'BrowseInformationFile': 'a_file_name', 'BufferSecurityCheck': 'true', 'BuildingInIDE': 'true', 'CallingConvention': 'Cdecl', 'CompileAs': 'CompileAsC', 'CompileAsManaged': 'Pure', 'CreateHotpatchableImage': 'true', 'DebugInformationFormat': 'ProgramDatabase', 'DisableLanguageExtensions': 'true', 'DisableSpecificWarnings': 'string1;string2', 'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions', 'EnableFiberSafeOptimizations': 'true', 'EnablePREfast': 'true', 'Enableprefast': 'bogus', 'ErrorReporting': 'Prompt', 'ExceptionHandling': 'SyncCThrow', 'ExpandAttributedSource': 'true', 'FavorSizeOrSpeed': 'Neither', 'FloatingPointExceptions': 'true', 'FloatingPointModel': 'Precise', 'ForceConformanceInForLoopScope': 'true', 'ForcedIncludeFiles': 'file1;file2', 'ForcedUsingFiles': 'file1;file2', 'FunctionLevelLinking': 'false', 'GenerateXMLDocumentationFiles': 'true', 'IgnoreStandardIncludePath': 'true', 'InlineFunctionExpansion': 'OnlyExplicitInline', 'IntrinsicFunctions': 'false', 'MinimalRebuild': 'true', 'MultiProcessorCompilation': 'true', 'ObjectFileName': 'a_file_name', 'OmitDefaultLibName': 'true', 'OmitFramePointers': 'true', 'OpenMPSupport': 'true', 'Optimization': 'Disabled', 'PrecompiledHeader': 'NotUsing', 'PrecompiledHeaderFile': 'a_file_name', 'PrecompiledHeaderOutputFile': 'a_file_name', 'PreprocessKeepComments': 'true', 'PreprocessorDefinitions': 'string1;string2', 'PreprocessOutputPath': 'a string1', 'PreprocessSuppressLineNumbers': 'false', 'PreprocessToFile': 'false', 'ProcessorNumber': '33', 'ProgramDataBaseFileName': 'a_file_name', 'RuntimeLibrary': 'MultiThreaded', 'RuntimeTypeInfo': 'true', 'ShowIncludes': 'true', 'SmallerTypeCheck': 'true', 'StringPooling': 'true', 'StructMemberAlignment': '1Byte', 'SuppressStartupBanner': 'true', 'TrackerLogDirectory': 'a_folder', 'TreatSpecificWarningsAsErrors': 'string1;string2', 'TreatWarningAsError': 'true', 'TreatWChar_tAsBuiltInType': 'true', 'UndefineAllPreprocessorDefinitions': 'true', 'UndefinePreprocessorDefinitions': 'string1;string2', 'UseFullPaths': 'true', 'UseUnicodeForAssemblerListing': 'true', 'WarningLevel': 'TurnOffAllWarnings', 'WholeProgramOptimization': 'true', 'XMLDocumentationFileName': 'a_file_name', 'ZZXYZ': 'bogus'}, 'Link': { 'AdditionalDependencies': 'file1;file2', 'AdditionalLibraryDirectories': 'folder1;folder2', 'AdditionalManifestDependencies': 'file1;file2', 'AdditionalOptions': 'a string1', 'AddModuleNamesToAssembly': 'file1;file2', 'AllowIsolation': 'true', 'AssemblyDebug': '', 'AssemblyLinkResource': 'file1;file2', 'BaseAddress': 'a string1', 'BuildingInIDE': 'true', 'CLRImageType': 'ForceIJWImage', 'CLRSupportLastError': 'Enabled', 'CLRThreadAttribute': 'MTAThreadingAttribute', 'CLRUnmanagedCodeCheck': 'true', 'CreateHotPatchableImage': 'X86Image', 'DataExecutionPrevention': 'false', 'DelayLoadDLLs': 'file1;file2', 'DelaySign': 'true', 'Driver': 'NotSet', 'EmbedManagedResourceFile': 'file1;file2', 'EnableCOMDATFolding': 'false', 'EnableUAC': 'true', 'EntryPointSymbol': 'a string1', 'FixedBaseAddress': 'false', 'ForceFileOutput': 'Enabled', 'ForceSymbolReferences': 'file1;file2', 'FunctionOrder': 'a_file_name', 'GenerateDebugInformation': 'true', 'GenerateMapFile': 'true', 'HeapCommitSize': 'a string1', 'HeapReserveSize': 'a string1', 'IgnoreAllDefaultLibraries': 'true', 'IgnoreEmbeddedIDL': 'true', 'IgnoreSpecificDefaultLibraries': 'a_file_list', 'ImageHasSafeExceptionHandlers': 'true', 'ImportLibrary': 'a_file_name', 'KeyContainer': 'a_file_name', 'KeyFile': 'a_file_name', 'LargeAddressAware': 'false', 'LinkDLL': 'true', 'LinkErrorReporting': 'SendErrorReport', 'LinkStatus': 'true', 'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration', 'ManifestFile': 'a_file_name', 'MapExports': 'true', 'MapFileName': 'a_file_name', 'MergedIDLBaseFileName': 'a_file_name', 'MergeSections': 'a string1', 'MidlCommandFile': 'a_file_name', 'MinimumRequiredVersion': 'a string1', 'ModuleDefinitionFile': 'a_file_name', 'MSDOSStubFileName': 'a_file_name', 'NoEntryPoint': 'true', 'OptimizeReferences': 'false', 'OutputFile': 'a_file_name', 'PerUserRedirection': 'true', 'PreventDllBinding': 'true', 'Profile': 'true', 'ProfileGuidedDatabase': 'a_file_name', 'ProgramDatabaseFile': 'a_file_name', 'RandomizedBaseAddress': 'false', 'RegisterOutput': 'true', 'SectionAlignment': '33', 'SetChecksum': 'true', 'ShowProgress': 'LinkVerboseREF', 'SpecifySectionAttributes': 'a string1', 'StackCommitSize': 'a string1', 'StackReserveSize': 'a string1', 'StripPrivateSymbols': 'a_file_name', 'SubSystem': 'Console', 'SupportNobindOfDelayLoadedDLL': 'true', 'SupportUnloadOfDelayLoadedDLL': 'true', 'SuppressStartupBanner': 'true', 'SwapRunFromCD': 'true', 'SwapRunFromNET': 'true', 'TargetMachine': 'MachineX86', 'TerminalServerAware': 'false', 'TrackerLogDirectory': 'a_folder', 'TreatLinkerWarningAsErrors': 'true', 'TurnOffAssemblyGeneration': 'true', 'TypeLibraryFile': 'a_file_name', 'TypeLibraryResourceID': '33', 'UACExecutionLevel': 'AsInvoker', 'UACUIAccess': 'true', 'Version': 'a string1'}, 'ResourceCompile': { 'AdditionalIncludeDirectories': 'folder1;folder2', 'AdditionalOptions': 'a string1', 'Culture': '0x236', 'IgnoreStandardIncludePath': 'true', 'NullTerminateStrings': 'true', 'PreprocessorDefinitions': 'string1;string2', 'ResourceOutputFileName': 'a string1', 'ShowProgress': 'true', 'SuppressStartupBanner': 'true', 'TrackerLogDirectory': 'a_folder', 'UndefinePreprocessorDefinitions': 'string1;string2'}, 'Midl': { 'AdditionalIncludeDirectories': 'folder1;folder2', 'AdditionalOptions': 'a string1', 'ApplicationConfigurationMode': 'true', 'ClientStubFile': 'a_file_name', 'CPreprocessOptions': 'a string1', 'DefaultCharType': 'Signed', 'DllDataFileName': 'a_file_name', 'EnableErrorChecks': 'EnableCustom', 'ErrorCheckAllocations': 'true', 'ErrorCheckBounds': 'true', 'ErrorCheckEnumRange': 'true', 'ErrorCheckRefPointers': 'true', 'ErrorCheckStubData': 'true', 'GenerateClientFiles': 'Stub', 'GenerateServerFiles': 'None', 'GenerateStublessProxies': 'true', 'GenerateTypeLibrary': 'true', 'HeaderFileName': 'a_file_name', 'IgnoreStandardIncludePath': 'true', 'InterfaceIdentifierFileName': 'a_file_name', 'LocaleID': '33', 'MkTypLibCompatible': 'true', 'OutputDirectory': 'a string1', 'PreprocessorDefinitions': 'string1;string2', 'ProxyFileName': 'a_file_name', 'RedirectOutputAndErrors': 'a_file_name', 'ServerStubFile': 'a_file_name', 'StructMemberAlignment': 'NotSet', 'SuppressCompilerWarnings': 'true', 'SuppressStartupBanner': 'true', 'TargetEnvironment': 'Itanium', 'TrackerLogDirectory': 'a_folder', 'TypeLibFormat': 'NewFormat', 'TypeLibraryName': 'a_file_name', 'UndefinePreprocessorDefinitions': 'string1;string2', 'ValidateAllParameters': 'true', 'WarnAsError': 'true', 'WarningLevel': '1'}, 'Lib': { 'AdditionalDependencies': 'file1;file2', 'AdditionalLibraryDirectories': 'folder1;folder2', 'AdditionalOptions': 'a string1', 'DisplayLibrary': 'a string1', 'ErrorReporting': 'PromptImmediately', 'ExportNamedFunctions': 'string1;string2', 'ForceSymbolReferences': 'a string1', 'IgnoreAllDefaultLibraries': 'true', 'IgnoreSpecificDefaultLibraries': 'file1;file2', 'LinkTimeCodeGeneration': 'true', 'MinimumRequiredVersion': 'a string1', 'ModuleDefinitionFile': 'a_file_name', 'Name': 'a_file_name', 'OutputFile': 'a_file_name', 'RemoveObjects': 'file1;file2', 'SubSystem': 'Console', 'SuppressStartupBanner': 'true', 'TargetMachine': 'MachineX86i', 'TrackerLogDirectory': 'a_folder', 'TreatLibWarningAsErrors': 'true', 'UseUnicodeResponseFiles': 'true', 'Verbose': 'true'}, 'Manifest': { 'AdditionalManifestFiles': 'file1;file2', 'AdditionalOptions': 'a string1', 'AssemblyIdentity': 'a string1', 'ComponentFileName': 'a_file_name', 'EnableDPIAwareness': 'fal', 'GenerateCatalogFiles': 'truel', 'GenerateCategoryTags': 'true', 'InputResourceManifests': 'a string1', 'ManifestFromManagedAssembly': 'a_file_name', 'notgood3': 'bogus', 'OutputManifestFile': 'a_file_name', 'OutputResourceManifests': 'a string1', 'RegistrarScriptFile': 'a_file_name', 'ReplacementsFile': 'a_file_name', 'SuppressDependencyElement': 'true', 'SuppressStartupBanner': 'true', 'TrackerLogDirectory': 'a_folder', 'TypeLibraryFile': 'a_file_name', 'UpdateFileHashes': 'true', 'UpdateFileHashesSearchPath': 'a_file_name', 'VerboseOutput': 'true'}, 'ProjectReference': { 'LinkLibraryDependencies': 'true', 'UseLibraryDependencyInputs': 'true'}, 'ManifestResourceCompile': { 'ResourceOutputFileName': 'a_file_name'}, '': { 'EmbedManifest': 'true', 'GenerateManifest': 'true', 'IgnoreImportLibrary': 'true', 'LinkIncremental': 'false'}}, self.stderr) self._ExpectedWarnings([ 'Warning: unrecognized setting ClCompile/Enableprefast', 'Warning: unrecognized setting ClCompile/ZZXYZ', 'Warning: unrecognized setting Manifest/notgood3', 'Warning: for Manifest/GenerateCatalogFiles, ' "expected bool; got 'truel'", 'Warning: for Lib/TargetMachine, unrecognized enumerated value ' 'MachineX86i', "Warning: for Manifest/EnableDPIAwareness, expected bool; got 'fal'"]) def testConvertToMSBuildSettings_empty(self): """Tests an empty conversion.""" msvs_settings = {} expected_msbuild_settings = {} actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings( msvs_settings, self.stderr) self.assertEqual(expected_msbuild_settings, actual_msbuild_settings) self._ExpectedWarnings([]) def testConvertToMSBuildSettings_minimal(self): """Tests a minimal conversion.""" msvs_settings = { 'VCCLCompilerTool': { 'AdditionalIncludeDirectories': 'dir1', 'AdditionalOptions': '/foo', 'BasicRuntimeChecks': '0', }, 'VCLinkerTool': { 'LinkTimeCodeGeneration': '1', 'ErrorReporting': '1', 'DataExecutionPrevention': '2', }, } expected_msbuild_settings = { 'ClCompile': { 'AdditionalIncludeDirectories': 'dir1', 'AdditionalOptions': '/foo', 'BasicRuntimeChecks': 'Default', }, 'Link': { 'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration', 'LinkErrorReporting': 'PromptImmediately', 'DataExecutionPrevention': 'true', }, } actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings( msvs_settings, self.stderr) self.assertEqual(expected_msbuild_settings, actual_msbuild_settings) self._ExpectedWarnings([]) def testConvertToMSBuildSettings_warnings(self): """Tests conversion that generates warnings.""" msvs_settings = { 'VCCLCompilerTool': { 'AdditionalIncludeDirectories': '1', 'AdditionalOptions': '2', # These are incorrect values: 'BasicRuntimeChecks': '12', 'BrowseInformation': '21', 'UsePrecompiledHeader': '13', 'GeneratePreprocessedFile': '14'}, 'VCLinkerTool': { # These are incorrect values: 'Driver': '10', 'LinkTimeCodeGeneration': '31', 'ErrorReporting': '21', 'FixedBaseAddress': '6'}, 'VCResourceCompilerTool': { # Custom 'Culture': '1003'}} expected_msbuild_settings = { 'ClCompile': { 'AdditionalIncludeDirectories': '1', 'AdditionalOptions': '2'}, 'Link': {}, 'ResourceCompile': { # Custom 'Culture': '0x03eb'}} actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings( msvs_settings, self.stderr) self.assertEqual(expected_msbuild_settings, actual_msbuild_settings) self._ExpectedWarnings([ 'Warning: while converting VCCLCompilerTool/BasicRuntimeChecks to ' 'MSBuild, index value (12) not in expected range [0, 4)', 'Warning: while converting VCCLCompilerTool/BrowseInformation to ' 'MSBuild, index value (21) not in expected range [0, 3)', 'Warning: while converting VCCLCompilerTool/UsePrecompiledHeader to ' 'MSBuild, index value (13) not in expected range [0, 3)', 'Warning: while converting VCCLCompilerTool/GeneratePreprocessedFile to ' 'MSBuild, value must be one of [0, 1, 2]; got 14', 'Warning: while converting VCLinkerTool/Driver to ' 'MSBuild, index value (10) not in expected range [0, 4)', 'Warning: while converting VCLinkerTool/LinkTimeCodeGeneration to ' 'MSBuild, index value (31) not in expected range [0, 5)', 'Warning: while converting VCLinkerTool/ErrorReporting to ' 'MSBuild, index value (21) not in expected range [0, 3)', 'Warning: while converting VCLinkerTool/FixedBaseAddress to ' 'MSBuild, index value (6) not in expected range [0, 3)', ]) def testConvertToMSBuildSettings_full_synthetic(self): """Tests conversion of all the MSBuild settings.""" msvs_settings = { 'VCCLCompilerTool': { 'AdditionalIncludeDirectories': 'folder1;folder2;folder3', 'AdditionalOptions': 'a_string', 'AdditionalUsingDirectories': 'folder1;folder2;folder3', 'AssemblerListingLocation': 'a_file_name', 'AssemblerOutput': '0', 'BasicRuntimeChecks': '1', 'BrowseInformation': '2', 'BrowseInformationFile': 'a_file_name', 'BufferSecurityCheck': 'true', 'CallingConvention': '0', 'CompileAs': '1', 'DebugInformationFormat': '4', 'DefaultCharIsUnsigned': 'true', 'Detect64BitPortabilityProblems': 'true', 'DisableLanguageExtensions': 'true', 'DisableSpecificWarnings': 'd1;d2;d3', 'EnableEnhancedInstructionSet': '0', 'EnableFiberSafeOptimizations': 'true', 'EnableFunctionLevelLinking': 'true', 'EnableIntrinsicFunctions': 'true', 'EnablePREfast': 'true', 'ErrorReporting': '1', 'ExceptionHandling': '2', 'ExpandAttributedSource': 'true', 'FavorSizeOrSpeed': '0', 'FloatingPointExceptions': 'true', 'FloatingPointModel': '1', 'ForceConformanceInForLoopScope': 'true', 'ForcedIncludeFiles': 'file1;file2;file3', 'ForcedUsingFiles': 'file1;file2;file3', 'GeneratePreprocessedFile': '1', 'GenerateXMLDocumentationFiles': 'true', 'IgnoreStandardIncludePath': 'true', 'InlineFunctionExpansion': '2', 'KeepComments': 'true', 'MinimalRebuild': 'true', 'ObjectFile': 'a_file_name', 'OmitDefaultLibName': 'true', 'OmitFramePointers': 'true', 'OpenMP': 'true', 'Optimization': '3', 'PrecompiledHeaderFile': 'a_file_name', 'PrecompiledHeaderThrough': 'a_file_name', 'PreprocessorDefinitions': 'd1;d2;d3', 'ProgramDataBaseFileName': 'a_file_name', 'RuntimeLibrary': '0', 'RuntimeTypeInfo': 'true', 'ShowIncludes': 'true', 'SmallerTypeCheck': 'true', 'StringPooling': 'true', 'StructMemberAlignment': '1', 'SuppressStartupBanner': 'true', 'TreatWChar_tAsBuiltInType': 'true', 'UndefineAllPreprocessorDefinitions': 'true', 'UndefinePreprocessorDefinitions': 'd1;d2;d3', 'UseFullPaths': 'true', 'UsePrecompiledHeader': '1', 'UseUnicodeResponseFiles': 'true', 'WarnAsError': 'true', 'WarningLevel': '2', 'WholeProgramOptimization': 'true', 'XMLDocumentationFileName': 'a_file_name'}, 'VCLinkerTool': { 'AdditionalDependencies': 'file1;file2;file3', 'AdditionalLibraryDirectories': 'folder1;folder2;folder3', 'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3', 'AdditionalManifestDependencies': 'file1;file2;file3', 'AdditionalOptions': 'a_string', 'AddModuleNamesToAssembly': 'file1;file2;file3', 'AllowIsolation': 'true', 'AssemblyDebug': '0', 'AssemblyLinkResource': 'file1;file2;file3', 'BaseAddress': 'a_string', 'CLRImageType': '1', 'CLRThreadAttribute': '2', 'CLRUnmanagedCodeCheck': 'true', 'DataExecutionPrevention': '0', 'DelayLoadDLLs': 'file1;file2;file3', 'DelaySign': 'true', 'Driver': '1', 'EmbedManagedResourceFile': 'file1;file2;file3', 'EnableCOMDATFolding': '0', 'EnableUAC': 'true', 'EntryPointSymbol': 'a_string', 'ErrorReporting': '0', 'FixedBaseAddress': '1', 'ForceSymbolReferences': 'file1;file2;file3', 'FunctionOrder': 'a_file_name', 'GenerateDebugInformation': 'true', 'GenerateManifest': 'true', 'GenerateMapFile': 'true', 'HeapCommitSize': 'a_string', 'HeapReserveSize': 'a_string', 'IgnoreAllDefaultLibraries': 'true', 'IgnoreDefaultLibraryNames': 'file1;file2;file3', 'IgnoreEmbeddedIDL': 'true', 'IgnoreImportLibrary': 'true', 'ImportLibrary': 'a_file_name', 'KeyContainer': 'a_file_name', 'KeyFile': 'a_file_name', 'LargeAddressAware': '2', 'LinkIncremental': '1', 'LinkLibraryDependencies': 'true', 'LinkTimeCodeGeneration': '2', 'ManifestFile': 'a_file_name', 'MapExports': 'true', 'MapFileName': 'a_file_name', 'MergedIDLBaseFileName': 'a_file_name', 'MergeSections': 'a_string', 'MidlCommandFile': 'a_file_name', 'ModuleDefinitionFile': 'a_file_name', 'OptimizeForWindows98': '1', 'OptimizeReferences': '0', 'OutputFile': 'a_file_name', 'PerUserRedirection': 'true', 'Profile': 'true', 'ProfileGuidedDatabase': 'a_file_name', 'ProgramDatabaseFile': 'a_file_name', 'RandomizedBaseAddress': '1', 'RegisterOutput': 'true', 'ResourceOnlyDLL': 'true', 'SetChecksum': 'true', 'ShowProgress': '0', 'StackCommitSize': 'a_string', 'StackReserveSize': 'a_string', 'StripPrivateSymbols': 'a_file_name', 'SubSystem': '2', 'SupportUnloadOfDelayLoadedDLL': 'true', 'SuppressStartupBanner': 'true', 'SwapRunFromCD': 'true', 'SwapRunFromNet': 'true', 'TargetMachine': '3', 'TerminalServerAware': '2', 'TurnOffAssemblyGeneration': 'true', 'TypeLibraryFile': 'a_file_name', 'TypeLibraryResourceID': '33', 'UACExecutionLevel': '1', 'UACUIAccess': 'true', 'UseLibraryDependencyInputs': 'false', 'UseUnicodeResponseFiles': 'true', 'Version': 'a_string'}, 'VCResourceCompilerTool': { 'AdditionalIncludeDirectories': 'folder1;folder2;folder3', 'AdditionalOptions': 'a_string', 'Culture': '1003', 'IgnoreStandardIncludePath': 'true', 'PreprocessorDefinitions': 'd1;d2;d3', 'ResourceOutputFileName': 'a_string', 'ShowProgress': 'true', 'SuppressStartupBanner': 'true', 'UndefinePreprocessorDefinitions': 'd1;d2;d3'}, 'VCMIDLTool': { 'AdditionalIncludeDirectories': 'folder1;folder2;folder3', 'AdditionalOptions': 'a_string', 'CPreprocessOptions': 'a_string', 'DefaultCharType': '0', 'DLLDataFileName': 'a_file_name', 'EnableErrorChecks': '2', 'ErrorCheckAllocations': 'true', 'ErrorCheckBounds': 'true', 'ErrorCheckEnumRange': 'true', 'ErrorCheckRefPointers': 'true', 'ErrorCheckStubData': 'true', 'GenerateStublessProxies': 'true', 'GenerateTypeLibrary': 'true', 'HeaderFileName': 'a_file_name', 'IgnoreStandardIncludePath': 'true', 'InterfaceIdentifierFileName': 'a_file_name', 'MkTypLibCompatible': 'true', 'OutputDirectory': 'a_string', 'PreprocessorDefinitions': 'd1;d2;d3', 'ProxyFileName': 'a_file_name', 'RedirectOutputAndErrors': 'a_file_name', 'StructMemberAlignment': '3', 'SuppressStartupBanner': 'true', 'TargetEnvironment': '1', 'TypeLibraryName': 'a_file_name', 'UndefinePreprocessorDefinitions': 'd1;d2;d3', 'ValidateParameters': 'true', 'WarnAsError': 'true', 'WarningLevel': '4'}, 'VCLibrarianTool': { 'AdditionalDependencies': 'file1;file2;file3', 'AdditionalLibraryDirectories': 'folder1;folder2;folder3', 'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3', 'AdditionalOptions': 'a_string', 'ExportNamedFunctions': 'd1;d2;d3', 'ForceSymbolReferences': 'a_string', 'IgnoreAllDefaultLibraries': 'true', 'IgnoreSpecificDefaultLibraries': 'file1;file2;file3', 'LinkLibraryDependencies': 'true', 'ModuleDefinitionFile': 'a_file_name', 'OutputFile': 'a_file_name', 'SuppressStartupBanner': 'true', 'UseUnicodeResponseFiles': 'true'}, 'VCManifestTool': { 'AdditionalManifestFiles': 'file1;file2;file3', 'AdditionalOptions': 'a_string', 'AssemblyIdentity': 'a_string', 'ComponentFileName': 'a_file_name', 'DependencyInformationFile': 'a_file_name', 'EmbedManifest': 'true', 'GenerateCatalogFiles': 'true', 'InputResourceManifests': 'a_string', 'ManifestResourceFile': 'my_name', 'OutputManifestFile': 'a_file_name', 'RegistrarScriptFile': 'a_file_name', 'ReplacementsFile': 'a_file_name', 'SuppressStartupBanner': 'true', 'TypeLibraryFile': 'a_file_name', 'UpdateFileHashes': 'true', 'UpdateFileHashesSearchPath': 'a_file_name', 'UseFAT32Workaround': 'true', 'UseUnicodeResponseFiles': 'true', 'VerboseOutput': 'true'}} expected_msbuild_settings = { 'ClCompile': { 'AdditionalIncludeDirectories': 'folder1;folder2;folder3', 'AdditionalOptions': 'a_string /J', 'AdditionalUsingDirectories': 'folder1;folder2;folder3', 'AssemblerListingLocation': 'a_file_name', 'AssemblerOutput': 'NoListing', 'BasicRuntimeChecks': 'StackFrameRuntimeCheck', 'BrowseInformation': 'true', 'BrowseInformationFile': 'a_file_name', 'BufferSecurityCheck': 'true', 'CallingConvention': 'Cdecl', 'CompileAs': 'CompileAsC', 'DebugInformationFormat': 'EditAndContinue', 'DisableLanguageExtensions': 'true', 'DisableSpecificWarnings': 'd1;d2;d3', 'EnableEnhancedInstructionSet': 'NotSet', 'EnableFiberSafeOptimizations': 'true', 'EnablePREfast': 'true', 'ErrorReporting': 'Prompt', 'ExceptionHandling': 'Async', 'ExpandAttributedSource': 'true', 'FavorSizeOrSpeed': 'Neither', 'FloatingPointExceptions': 'true', 'FloatingPointModel': 'Strict', 'ForceConformanceInForLoopScope': 'true', 'ForcedIncludeFiles': 'file1;file2;file3', 'ForcedUsingFiles': 'file1;file2;file3', 'FunctionLevelLinking': 'true', 'GenerateXMLDocumentationFiles': 'true', 'IgnoreStandardIncludePath': 'true', 'InlineFunctionExpansion': 'AnySuitable', 'IntrinsicFunctions': 'true', 'MinimalRebuild': 'true', 'ObjectFileName': 'a_file_name', 'OmitDefaultLibName': 'true', 'OmitFramePointers': 'true', 'OpenMPSupport': 'true', 'Optimization': 'Full', 'PrecompiledHeader': 'Create', 'PrecompiledHeaderFile': 'a_file_name', 'PrecompiledHeaderOutputFile': 'a_file_name', 'PreprocessKeepComments': 'true', 'PreprocessorDefinitions': 'd1;d2;d3', 'PreprocessSuppressLineNumbers': 'false', 'PreprocessToFile': 'true', 'ProgramDataBaseFileName': 'a_file_name', 'RuntimeLibrary': 'MultiThreaded', 'RuntimeTypeInfo': 'true', 'ShowIncludes': 'true', 'SmallerTypeCheck': 'true', 'StringPooling': 'true', 'StructMemberAlignment': '1Byte', 'SuppressStartupBanner': 'true', 'TreatWarningAsError': 'true', 'TreatWChar_tAsBuiltInType': 'true', 'UndefineAllPreprocessorDefinitions': 'true', 'UndefinePreprocessorDefinitions': 'd1;d2;d3', 'UseFullPaths': 'true', 'WarningLevel': 'Level2', 'WholeProgramOptimization': 'true', 'XMLDocumentationFileName': 'a_file_name'}, 'Link': { 'AdditionalDependencies': 'file1;file2;file3', 'AdditionalLibraryDirectories': 'folder1;folder2;folder3', 'AdditionalManifestDependencies': 'file1;file2;file3', 'AdditionalOptions': 'a_string', 'AddModuleNamesToAssembly': 'file1;file2;file3', 'AllowIsolation': 'true', 'AssemblyDebug': '', 'AssemblyLinkResource': 'file1;file2;file3', 'BaseAddress': 'a_string', 'CLRImageType': 'ForceIJWImage', 'CLRThreadAttribute': 'STAThreadingAttribute', 'CLRUnmanagedCodeCheck': 'true', 'DataExecutionPrevention': '', 'DelayLoadDLLs': 'file1;file2;file3', 'DelaySign': 'true', 'Driver': 'Driver', 'EmbedManagedResourceFile': 'file1;file2;file3', 'EnableCOMDATFolding': '', 'EnableUAC': 'true', 'EntryPointSymbol': 'a_string', 'FixedBaseAddress': 'false', 'ForceSymbolReferences': 'file1;file2;file3', 'FunctionOrder': 'a_file_name', 'GenerateDebugInformation': 'true', 'GenerateMapFile': 'true', 'HeapCommitSize': 'a_string', 'HeapReserveSize': 'a_string', 'IgnoreAllDefaultLibraries': 'true', 'IgnoreEmbeddedIDL': 'true', 'IgnoreSpecificDefaultLibraries': 'file1;file2;file3', 'ImportLibrary': 'a_file_name', 'KeyContainer': 'a_file_name', 'KeyFile': 'a_file_name', 'LargeAddressAware': 'true', 'LinkErrorReporting': 'NoErrorReport', 'LinkTimeCodeGeneration': 'PGInstrument', 'ManifestFile': 'a_file_name', 'MapExports': 'true', 'MapFileName': 'a_file_name', 'MergedIDLBaseFileName': 'a_file_name', 'MergeSections': 'a_string', 'MidlCommandFile': 'a_file_name', 'ModuleDefinitionFile': 'a_file_name', 'NoEntryPoint': 'true', 'OptimizeReferences': '', 'OutputFile': 'a_file_name', 'PerUserRedirection': 'true', 'Profile': 'true', 'ProfileGuidedDatabase': 'a_file_name', 'ProgramDatabaseFile': 'a_file_name', 'RandomizedBaseAddress': 'false', 'RegisterOutput': 'true', 'SetChecksum': 'true', 'ShowProgress': 'NotSet', 'StackCommitSize': 'a_string', 'StackReserveSize': 'a_string', 'StripPrivateSymbols': 'a_file_name', 'SubSystem': 'Windows', 'SupportUnloadOfDelayLoadedDLL': 'true', 'SuppressStartupBanner': 'true', 'SwapRunFromCD': 'true', 'SwapRunFromNET': 'true', 'TargetMachine': 'MachineARM', 'TerminalServerAware': 'true', 'TurnOffAssemblyGeneration': 'true', 'TypeLibraryFile': 'a_file_name', 'TypeLibraryResourceID': '33', 'UACExecutionLevel': 'HighestAvailable', 'UACUIAccess': 'true', 'Version': 'a_string'}, 'ResourceCompile': { 'AdditionalIncludeDirectories': 'folder1;folder2;folder3', 'AdditionalOptions': 'a_string', 'Culture': '0x03eb', 'IgnoreStandardIncludePath': 'true', 'PreprocessorDefinitions': 'd1;d2;d3', 'ResourceOutputFileName': 'a_string', 'ShowProgress': 'true', 'SuppressStartupBanner': 'true', 'UndefinePreprocessorDefinitions': 'd1;d2;d3'}, 'Midl': { 'AdditionalIncludeDirectories': 'folder1;folder2;folder3', 'AdditionalOptions': 'a_string', 'CPreprocessOptions': 'a_string', 'DefaultCharType': 'Unsigned', 'DllDataFileName': 'a_file_name', 'EnableErrorChecks': 'All', 'ErrorCheckAllocations': 'true', 'ErrorCheckBounds': 'true', 'ErrorCheckEnumRange': 'true', 'ErrorCheckRefPointers': 'true', 'ErrorCheckStubData': 'true', 'GenerateStublessProxies': 'true', 'GenerateTypeLibrary': 'true', 'HeaderFileName': 'a_file_name', 'IgnoreStandardIncludePath': 'true', 'InterfaceIdentifierFileName': 'a_file_name', 'MkTypLibCompatible': 'true', 'OutputDirectory': 'a_string', 'PreprocessorDefinitions': 'd1;d2;d3', 'ProxyFileName': 'a_file_name', 'RedirectOutputAndErrors': 'a_file_name', 'StructMemberAlignment': '4', 'SuppressStartupBanner': 'true', 'TargetEnvironment': 'Win32', 'TypeLibraryName': 'a_file_name', 'UndefinePreprocessorDefinitions': 'd1;d2;d3', 'ValidateAllParameters': 'true', 'WarnAsError': 'true', 'WarningLevel': '4'}, 'Lib': { 'AdditionalDependencies': 'file1;file2;file3', 'AdditionalLibraryDirectories': 'folder1;folder2;folder3', 'AdditionalOptions': 'a_string', 'ExportNamedFunctions': 'd1;d2;d3', 'ForceSymbolReferences': 'a_string', 'IgnoreAllDefaultLibraries': 'true', 'IgnoreSpecificDefaultLibraries': 'file1;file2;file3', 'ModuleDefinitionFile': 'a_file_name', 'OutputFile': 'a_file_name', 'SuppressStartupBanner': 'true', 'UseUnicodeResponseFiles': 'true'}, 'Manifest': { 'AdditionalManifestFiles': 'file1;file2;file3', 'AdditionalOptions': 'a_string', 'AssemblyIdentity': 'a_string', 'ComponentFileName': 'a_file_name', 'GenerateCatalogFiles': 'true', 'InputResourceManifests': 'a_string', 'OutputManifestFile': 'a_file_name', 'RegistrarScriptFile': 'a_file_name', 'ReplacementsFile': 'a_file_name', 'SuppressStartupBanner': 'true', 'TypeLibraryFile': 'a_file_name', 'UpdateFileHashes': 'true', 'UpdateFileHashesSearchPath': 'a_file_name', 'VerboseOutput': 'true'}, 'ManifestResourceCompile': { 'ResourceOutputFileName': 'my_name'}, 'ProjectReference': { 'LinkLibraryDependencies': 'true', 'UseLibraryDependencyInputs': 'false'}, '': { 'EmbedManifest': 'true', 'GenerateManifest': 'true', 'IgnoreImportLibrary': 'true', 'LinkIncremental': 'false'}} actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings( msvs_settings, self.stderr) self.assertEqual(expected_msbuild_settings, actual_msbuild_settings) self._ExpectedWarnings([]) def testConvertToMSBuildSettings_actual(self): """Tests the conversion of an actual project. A VS2008 project with most of the options defined was created through the VS2008 IDE. It was then converted to VS2010. The tool settings found in the .vcproj and .vcxproj files were converted to the two dictionaries msvs_settings and expected_msbuild_settings. Note that for many settings, the VS2010 converter adds macros like %(AdditionalIncludeDirectories) to make sure than inherited values are included. Since the Gyp projects we generate do not use inheritance, we removed these macros. They were: ClCompile: AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)' AdditionalOptions: ' %(AdditionalOptions)' AdditionalUsingDirectories: ';%(AdditionalUsingDirectories)' DisableSpecificWarnings: ';%(DisableSpecificWarnings)', ForcedIncludeFiles: ';%(ForcedIncludeFiles)', ForcedUsingFiles: ';%(ForcedUsingFiles)', PreprocessorDefinitions: ';%(PreprocessorDefinitions)', UndefinePreprocessorDefinitions: ';%(UndefinePreprocessorDefinitions)', Link: AdditionalDependencies: ';%(AdditionalDependencies)', AdditionalLibraryDirectories: ';%(AdditionalLibraryDirectories)', AdditionalManifestDependencies: ';%(AdditionalManifestDependencies)', AdditionalOptions: ' %(AdditionalOptions)', AddModuleNamesToAssembly: ';%(AddModuleNamesToAssembly)', AssemblyLinkResource: ';%(AssemblyLinkResource)', DelayLoadDLLs: ';%(DelayLoadDLLs)', EmbedManagedResourceFile: ';%(EmbedManagedResourceFile)', ForceSymbolReferences: ';%(ForceSymbolReferences)', IgnoreSpecificDefaultLibraries: ';%(IgnoreSpecificDefaultLibraries)', ResourceCompile: AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)', AdditionalOptions: ' %(AdditionalOptions)', PreprocessorDefinitions: ';%(PreprocessorDefinitions)', Manifest: AdditionalManifestFiles: ';%(AdditionalManifestFiles)', AdditionalOptions: ' %(AdditionalOptions)', InputResourceManifests: ';%(InputResourceManifests)', """ msvs_settings = { 'VCCLCompilerTool': { 'AdditionalIncludeDirectories': 'dir1', 'AdditionalOptions': '/more', 'AdditionalUsingDirectories': 'test', 'AssemblerListingLocation': '$(IntDir)\\a', 'AssemblerOutput': '1', 'BasicRuntimeChecks': '3', 'BrowseInformation': '1', 'BrowseInformationFile': '$(IntDir)\\e', 'BufferSecurityCheck': 'false', 'CallingConvention': '1', 'CompileAs': '1', 'DebugInformationFormat': '4', 'DefaultCharIsUnsigned': 'true', 'Detect64BitPortabilityProblems': 'true', 'DisableLanguageExtensions': 'true', 'DisableSpecificWarnings': 'abc', 'EnableEnhancedInstructionSet': '1', 'EnableFiberSafeOptimizations': 'true', 'EnableFunctionLevelLinking': 'true', 'EnableIntrinsicFunctions': 'true', 'EnablePREfast': 'true', 'ErrorReporting': '2', 'ExceptionHandling': '2', 'ExpandAttributedSource': 'true', 'FavorSizeOrSpeed': '2', 'FloatingPointExceptions': 'true', 'FloatingPointModel': '1', 'ForceConformanceInForLoopScope': 'false', 'ForcedIncludeFiles': 'def', 'ForcedUsingFiles': 'ge', 'GeneratePreprocessedFile': '2', 'GenerateXMLDocumentationFiles': 'true', 'IgnoreStandardIncludePath': 'true', 'InlineFunctionExpansion': '1', 'KeepComments': 'true', 'MinimalRebuild': 'true', 'ObjectFile': '$(IntDir)\\b', 'OmitDefaultLibName': 'true', 'OmitFramePointers': 'true', 'OpenMP': 'true', 'Optimization': '3', 'PrecompiledHeaderFile': '$(IntDir)\\$(TargetName).pche', 'PrecompiledHeaderThrough': 'StdAfx.hd', 'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE', 'ProgramDataBaseFileName': '$(IntDir)\\vc90b.pdb', 'RuntimeLibrary': '3', 'RuntimeTypeInfo': 'false', 'ShowIncludes': 'true', 'SmallerTypeCheck': 'true', 'StringPooling': 'true', 'StructMemberAlignment': '3', 'SuppressStartupBanner': 'false', 'TreatWChar_tAsBuiltInType': 'false', 'UndefineAllPreprocessorDefinitions': 'true', 'UndefinePreprocessorDefinitions': 'wer', 'UseFullPaths': 'true', 'UsePrecompiledHeader': '0', 'UseUnicodeResponseFiles': 'false', 'WarnAsError': 'true', 'WarningLevel': '3', 'WholeProgramOptimization': 'true', 'XMLDocumentationFileName': '$(IntDir)\\c'}, 'VCLinkerTool': { 'AdditionalDependencies': 'zx', 'AdditionalLibraryDirectories': 'asd', 'AdditionalManifestDependencies': 's2', 'AdditionalOptions': '/mor2', 'AddModuleNamesToAssembly': 'd1', 'AllowIsolation': 'false', 'AssemblyDebug': '1', 'AssemblyLinkResource': 'd5', 'BaseAddress': '23423', 'CLRImageType': '3', 'CLRThreadAttribute': '1', 'CLRUnmanagedCodeCheck': 'true', 'DataExecutionPrevention': '0', 'DelayLoadDLLs': 'd4', 'DelaySign': 'true', 'Driver': '2', 'EmbedManagedResourceFile': 'd2', 'EnableCOMDATFolding': '1', 'EnableUAC': 'false', 'EntryPointSymbol': 'f5', 'ErrorReporting': '2', 'FixedBaseAddress': '1', 'ForceSymbolReferences': 'd3', 'FunctionOrder': 'fssdfsd', 'GenerateDebugInformation': 'true', 'GenerateManifest': 'false', 'GenerateMapFile': 'true', 'HeapCommitSize': '13', 'HeapReserveSize': '12', 'IgnoreAllDefaultLibraries': 'true', 'IgnoreDefaultLibraryNames': 'flob;flok', 'IgnoreEmbeddedIDL': 'true', 'IgnoreImportLibrary': 'true', 'ImportLibrary': 'f4', 'KeyContainer': 'f7', 'KeyFile': 'f6', 'LargeAddressAware': '2', 'LinkIncremental': '0', 'LinkLibraryDependencies': 'false', 'LinkTimeCodeGeneration': '1', 'ManifestFile': '$(IntDir)\\$(TargetFileName).2intermediate.manifest', 'MapExports': 'true', 'MapFileName': 'd5', 'MergedIDLBaseFileName': 'f2', 'MergeSections': 'f5', 'MidlCommandFile': 'f1', 'ModuleDefinitionFile': 'sdsd', 'OptimizeForWindows98': '2', 'OptimizeReferences': '2', 'OutputFile': '$(OutDir)\\$(ProjectName)2.exe', 'PerUserRedirection': 'true', 'Profile': 'true', 'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd', 'ProgramDatabaseFile': 'Flob.pdb', 'RandomizedBaseAddress': '1', 'RegisterOutput': 'true', 'ResourceOnlyDLL': 'true', 'SetChecksum': 'false', 'ShowProgress': '1', 'StackCommitSize': '15', 'StackReserveSize': '14', 'StripPrivateSymbols': 'd3', 'SubSystem': '1', 'SupportUnloadOfDelayLoadedDLL': 'true', 'SuppressStartupBanner': 'false', 'SwapRunFromCD': 'true', 'SwapRunFromNet': 'true', 'TargetMachine': '1', 'TerminalServerAware': '1', 'TurnOffAssemblyGeneration': 'true', 'TypeLibraryFile': 'f3', 'TypeLibraryResourceID': '12', 'UACExecutionLevel': '2', 'UACUIAccess': 'true', 'UseLibraryDependencyInputs': 'true', 'UseUnicodeResponseFiles': 'false', 'Version': '333'}, 'VCResourceCompilerTool': { 'AdditionalIncludeDirectories': 'f3', 'AdditionalOptions': '/more3', 'Culture': '3084', 'IgnoreStandardIncludePath': 'true', 'PreprocessorDefinitions': '_UNICODE;UNICODE2', 'ResourceOutputFileName': '$(IntDir)/$(InputName)3.res', 'ShowProgress': 'true'}, 'VCManifestTool': { 'AdditionalManifestFiles': 'sfsdfsd', 'AdditionalOptions': 'afdsdafsd', 'AssemblyIdentity': 'sddfdsadfsa', 'ComponentFileName': 'fsdfds', 'DependencyInformationFile': '$(IntDir)\\mt.depdfd', 'EmbedManifest': 'false', 'GenerateCatalogFiles': 'true', 'InputResourceManifests': 'asfsfdafs', 'ManifestResourceFile': '$(IntDir)\\$(TargetFileName).embed.manifest.resfdsf', 'OutputManifestFile': '$(TargetPath).manifestdfs', 'RegistrarScriptFile': 'sdfsfd', 'ReplacementsFile': 'sdffsd', 'SuppressStartupBanner': 'false', 'TypeLibraryFile': 'sfsd', 'UpdateFileHashes': 'true', 'UpdateFileHashesSearchPath': 'sfsd', 'UseFAT32Workaround': 'true', 'UseUnicodeResponseFiles': 'false', 'VerboseOutput': 'true'}} expected_msbuild_settings = { 'ClCompile': { 'AdditionalIncludeDirectories': 'dir1', 'AdditionalOptions': '/more /J', 'AdditionalUsingDirectories': 'test', 'AssemblerListingLocation': '$(IntDir)a', 'AssemblerOutput': 'AssemblyCode', 'BasicRuntimeChecks': 'EnableFastChecks', 'BrowseInformation': 'true', 'BrowseInformationFile': '$(IntDir)e', 'BufferSecurityCheck': 'false', 'CallingConvention': 'FastCall', 'CompileAs': 'CompileAsC', 'DebugInformationFormat': 'EditAndContinue', 'DisableLanguageExtensions': 'true', 'DisableSpecificWarnings': 'abc', 'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions', 'EnableFiberSafeOptimizations': 'true', 'EnablePREfast': 'true', 'ErrorReporting': 'Queue', 'ExceptionHandling': 'Async', 'ExpandAttributedSource': 'true', 'FavorSizeOrSpeed': 'Size', 'FloatingPointExceptions': 'true', 'FloatingPointModel': 'Strict', 'ForceConformanceInForLoopScope': 'false', 'ForcedIncludeFiles': 'def', 'ForcedUsingFiles': 'ge', 'FunctionLevelLinking': 'true', 'GenerateXMLDocumentationFiles': 'true', 'IgnoreStandardIncludePath': 'true', 'InlineFunctionExpansion': 'OnlyExplicitInline', 'IntrinsicFunctions': 'true', 'MinimalRebuild': 'true', 'ObjectFileName': '$(IntDir)b', 'OmitDefaultLibName': 'true', 'OmitFramePointers': 'true', 'OpenMPSupport': 'true', 'Optimization': 'Full', 'PrecompiledHeader': 'NotUsing', # Actual conversion gives '' 'PrecompiledHeaderFile': 'StdAfx.hd', 'PrecompiledHeaderOutputFile': '$(IntDir)$(TargetName).pche', 'PreprocessKeepComments': 'true', 'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE', 'PreprocessSuppressLineNumbers': 'true', 'PreprocessToFile': 'true', 'ProgramDataBaseFileName': '$(IntDir)vc90b.pdb', 'RuntimeLibrary': 'MultiThreadedDebugDLL', 'RuntimeTypeInfo': 'false', 'ShowIncludes': 'true', 'SmallerTypeCheck': 'true', 'StringPooling': 'true', 'StructMemberAlignment': '4Bytes', 'SuppressStartupBanner': 'false', 'TreatWarningAsError': 'true', 'TreatWChar_tAsBuiltInType': 'false', 'UndefineAllPreprocessorDefinitions': 'true', 'UndefinePreprocessorDefinitions': 'wer', 'UseFullPaths': 'true', 'WarningLevel': 'Level3', 'WholeProgramOptimization': 'true', 'XMLDocumentationFileName': '$(IntDir)c'}, 'Link': { 'AdditionalDependencies': 'zx', 'AdditionalLibraryDirectories': 'asd', 'AdditionalManifestDependencies': 's2', 'AdditionalOptions': '/mor2', 'AddModuleNamesToAssembly': 'd1', 'AllowIsolation': 'false', 'AssemblyDebug': 'true', 'AssemblyLinkResource': 'd5', 'BaseAddress': '23423', 'CLRImageType': 'ForceSafeILImage', 'CLRThreadAttribute': 'MTAThreadingAttribute', 'CLRUnmanagedCodeCheck': 'true', 'DataExecutionPrevention': '', 'DelayLoadDLLs': 'd4', 'DelaySign': 'true', 'Driver': 'UpOnly', 'EmbedManagedResourceFile': 'd2', 'EnableCOMDATFolding': 'false', 'EnableUAC': 'false', 'EntryPointSymbol': 'f5', 'FixedBaseAddress': 'false', 'ForceSymbolReferences': 'd3', 'FunctionOrder': 'fssdfsd', 'GenerateDebugInformation': 'true', 'GenerateMapFile': 'true', 'HeapCommitSize': '13', 'HeapReserveSize': '12', 'IgnoreAllDefaultLibraries': 'true', 'IgnoreEmbeddedIDL': 'true', 'IgnoreSpecificDefaultLibraries': 'flob;flok', 'ImportLibrary': 'f4', 'KeyContainer': 'f7', 'KeyFile': 'f6', 'LargeAddressAware': 'true', 'LinkErrorReporting': 'QueueForNextLogin', 'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration', 'ManifestFile': '$(IntDir)$(TargetFileName).2intermediate.manifest', 'MapExports': 'true', 'MapFileName': 'd5', 'MergedIDLBaseFileName': 'f2', 'MergeSections': 'f5', 'MidlCommandFile': 'f1', 'ModuleDefinitionFile': 'sdsd', 'NoEntryPoint': 'true', 'OptimizeReferences': 'true', 'OutputFile': '$(OutDir)$(ProjectName)2.exe', 'PerUserRedirection': 'true', 'Profile': 'true', 'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd', 'ProgramDatabaseFile': 'Flob.pdb', 'RandomizedBaseAddress': 'false', 'RegisterOutput': 'true', 'SetChecksum': 'false', 'ShowProgress': 'LinkVerbose', 'StackCommitSize': '15', 'StackReserveSize': '14', 'StripPrivateSymbols': 'd3', 'SubSystem': 'Console', 'SupportUnloadOfDelayLoadedDLL': 'true', 'SuppressStartupBanner': 'false', 'SwapRunFromCD': 'true', 'SwapRunFromNET': 'true', 'TargetMachine': 'MachineX86', 'TerminalServerAware': 'false', 'TurnOffAssemblyGeneration': 'true', 'TypeLibraryFile': 'f3', 'TypeLibraryResourceID': '12', 'UACExecutionLevel': 'RequireAdministrator', 'UACUIAccess': 'true', 'Version': '333'}, 'ResourceCompile': { 'AdditionalIncludeDirectories': 'f3', 'AdditionalOptions': '/more3', 'Culture': '0x0c0c', 'IgnoreStandardIncludePath': 'true', 'PreprocessorDefinitions': '_UNICODE;UNICODE2', 'ResourceOutputFileName': '$(IntDir)%(Filename)3.res', 'ShowProgress': 'true'}, 'Manifest': { 'AdditionalManifestFiles': 'sfsdfsd', 'AdditionalOptions': 'afdsdafsd', 'AssemblyIdentity': 'sddfdsadfsa', 'ComponentFileName': 'fsdfds', 'GenerateCatalogFiles': 'true', 'InputResourceManifests': 'asfsfdafs', 'OutputManifestFile': '$(TargetPath).manifestdfs', 'RegistrarScriptFile': 'sdfsfd', 'ReplacementsFile': 'sdffsd', 'SuppressStartupBanner': 'false', 'TypeLibraryFile': 'sfsd', 'UpdateFileHashes': 'true', 'UpdateFileHashesSearchPath': 'sfsd', 'VerboseOutput': 'true'}, 'ProjectReference': { 'LinkLibraryDependencies': 'false', 'UseLibraryDependencyInputs': 'true'}, '': { 'EmbedManifest': 'false', 'GenerateManifest': 'false', 'IgnoreImportLibrary': 'true', 'LinkIncremental': '' }, 'ManifestResourceCompile': { 'ResourceOutputFileName': '$(IntDir)$(TargetFileName).embed.manifest.resfdsf'} } actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings( msvs_settings, self.stderr) self.assertEqual(expected_msbuild_settings, actual_msbuild_settings) self._ExpectedWarnings([]) if __name__ == '__main__': unittest.main()
jerbob92/CouchPotatoServer
refs/heads/master
couchpotato/core/providers/automation/rottentomatoes/main.py
10
from couchpotato.core.helpers.rss import RSS from couchpotato.core.helpers.variable import tryInt, splitString from couchpotato.core.logger import CPLog from couchpotato.core.providers.automation.base import Automation from xml.etree.ElementTree import QName import datetime import re log = CPLog(__name__) class Rottentomatoes(Automation, RSS): interval = 1800 def getIMDBids(self): movies = [] rotten_tomatoes_namespace = 'http://www.rottentomatoes.com/xmlns/rtmovie/' urls = dict(zip(splitString(self.conf('automation_urls')), [tryInt(x) for x in splitString(self.conf('automation_urls_use'))])) for url in urls: if not urls[url]: continue rss_movies = self.getRSSData(url) rating_tag = str(QName(rotten_tomatoes_namespace, 'tomatometer_percent')) for movie in rss_movies: value = self.getTextElement(movie, "title") result = re.search('(?<=%\s).*', value) if result: log.info2('Something smells...') rating = tryInt(self.getTextElement(movie, rating_tag)) name = result.group(0) if rating < tryInt(self.conf('tomatometer_percent')): log.info2('%s seems to be rotten...', name) else: log.info2('Found %s fresh enough movies, enqueuing: %s', (rating, name)) year = datetime.datetime.now().strftime("%Y") imdb = self.search(name, year) if imdb and self.isMinimalMovie(imdb): movies.append(imdb['imdb']) return movies
zhimin711/nova
refs/heads/master
nova/conf/__init__.py
2
# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This package got introduced during the Mitaka cycle in 2015 to # have a central place where the config options of Nova can be maintained. # For more background see the blueprint "centralize-config-options" from oslo_config import cfg # from nova.conf import api # from nova.conf import api_database from nova.conf import availability_zone # from nova.conf import aws from nova.conf import barbican from nova.conf import base from nova.conf import cells from nova.conf import cert # from nova.conf import cinder from nova.conf import cloudpipe from nova.conf import compute from nova.conf import conductor from nova.conf import configdrive from nova.conf import console from nova.conf import consoleauth # from nova.conf import cors # from nova.conf import cors.subdomain from nova.conf import crypto # from nova.conf import database # from nova.conf import disk from nova.conf import ephemeral_storage from nova.conf import floating_ips from nova.conf import glance from nova.conf import guestfs # from nova.conf import host from nova.conf import hyperv # from nova.conf import image # from nova.conf import imagecache from nova.conf import image_file_url from nova.conf import ipv6 from nova.conf import ironic from nova.conf import keymgr # from nova.conf import keystone_authtoken # from nova.conf import libvirt from nova.conf import mks # from nova.conf import matchmaker_redis # from nova.conf import metadata # from nova.conf import metrics from nova.conf import network from nova.conf import neutron from nova.conf import notifications from nova.conf import novnc # from nova.conf import osapi_v21 from nova.conf import pci from nova.conf import quota from nova.conf import rdp from nova.conf import remote_debug from nova.conf import rpc from nova.conf import scheduler # from nova.conf import security from nova.conf import serial_console from nova.conf import service # from nova.conf import spice # from nova.conf import ssl # from nova.conf import trusted_computing from nova.conf import upgrade_levels from nova.conf import virt from nova.conf import vmware from nova.conf import vnc # from nova.conf import volume from nova.conf import workarounds from nova.conf import wsgi from nova.conf import xenserver from nova.conf import xvp # from nova.conf import zookeeper CONF = cfg.CONF # api.register_opts(CONF) # api_database.register_opts(CONF) availability_zone.register_opts(CONF) # aws.register_opts(CONF) barbican.register_opts(CONF) base.register_opts(CONF) cells.register_opts(CONF) cert.register_opts(CONF) # cinder.register_opts(CONF) cloudpipe.register_opts(CONF) compute.register_opts(CONF) conductor.register_opts(CONF) configdrive.register_opts(CONF) console.register_opts(CONF) consoleauth.register_opts(CONF) # cors.register_opts(CONF) # cors.subdomain.register_opts(CONF) crypto.register_opts(CONF) # database.register_opts(CONF) # disk.register_opts(CONF) ephemeral_storage.register_opts(CONF) floating_ips.register_opts(CONF) glance.register_opts(CONF) guestfs.register_opts(CONF) # host.register_opts(CONF) hyperv.register_opts(CONF) mks.register_opts(CONF) # image.register_opts(CONF) # imagecache.register_opts(CONF) image_file_url.register_opts(CONF) ipv6.register_opts(CONF) ironic.register_opts(CONF) keymgr.register_opts(CONF) # keystone_authtoken.register_opts(CONF) # libvirt.register_opts(CONF) # matchmaker_redis.register_opts(CONF) # metadata.register_opts(CONF) # metrics.register_opts(CONF) network.register_opts(CONF) neutron.register_opts(CONF) notifications.register_opts(CONF) novnc.register_opts(CONF) # osapi_v21.register_opts(CONF) pci.register_opts(CONF) quota.register_opts(CONF) rdp.register_opts(CONF) rpc.register_opts(CONF) scheduler.register_opts(CONF) # security.register_opts(CONF) serial_console.register_opts(CONF) service.register_opts(CONF) # spice.register_opts(CONF) # ssl.register_opts(CONF) # trusted_computing.register_opts(CONF) upgrade_levels.register_opts(CONF) virt.register_opts(CONF) vmware.register_opts(CONF) vnc.register_opts(CONF) # volume.register_opts(CONF) workarounds.register_opts(CONF) wsgi.register_opts(CONF) xenserver.register_opts(CONF) xvp.register_opts(CONF) # zookeeper.register_opts(CONF) remote_debug.register_cli_opts(CONF)
eharney/nova
refs/heads/master
nova/api/openstack/compute/contrib/evacuate.py
8
# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from webob import exc from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova import compute from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import strutils from nova import utils authorize = extensions.extension_authorizer('compute', 'evacuate') class Controller(wsgi.Controller): def __init__(self, *args, **kwargs): super(Controller, self).__init__(*args, **kwargs) self.compute_api = compute.API() self.host_api = compute.HostAPI() @wsgi.action('evacuate') def _evacuate(self, req, id, body): """Permit admins to evacuate a server from a failed host to a new one. """ context = req.environ["nova.context"] authorize(context) if not self.is_valid_body(body, "evacuate"): raise exc.HTTPBadRequest(_("Malformed request body")) evacuate_body = body["evacuate"] try: host = evacuate_body["host"] on_shared_storage = strutils.bool_from_string( evacuate_body["onSharedStorage"]) except (TypeError, KeyError): msg = _("host and onSharedStorage must be specified.") raise exc.HTTPBadRequest(explanation=msg) password = None if 'adminPass' in evacuate_body: # check that if requested to evacuate server on shared storage # password not specified if on_shared_storage: msg = _("admin password can't be changed on existing disk") raise exc.HTTPBadRequest(explanation=msg) password = evacuate_body['adminPass'] elif not on_shared_storage: password = utils.generate_password() try: self.host_api.service_get_by_compute_host(context, host) except exception.NotFound: msg = _("Compute host %s not found.") % host raise exc.HTTPNotFound(explanation=msg) try: instance = self.compute_api.get(context, id) self.compute_api.evacuate(context, instance, host, on_shared_storage, password) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'evacuate') except exception.InstanceNotFound as e: raise exc.HTTPNotFound(explanation=e.format_message()) except exception.ComputeServiceInUse as e: raise exc.HTTPBadRequest(explanation=e.format_message()) if password: return {'adminPass': password} class Evacuate(extensions.ExtensionDescriptor): """Enables server evacuation.""" name = "Evacuate" alias = "os-evacuate" namespace = "http://docs.openstack.org/compute/ext/evacuate/api/v2" updated = "2013-01-06T00:00:00+00:00" def get_controller_extensions(self): controller = Controller() extension = extensions.ControllerExtension(self, 'servers', controller) return [extension]
xrmx/django-stored-messages
refs/heads/master
stored_messages/backends/redis/__init__.py
2
# flake8: noqa from .backend import RedisBackend, Message
cyril51/Sick-Beard
refs/heads/development
lib/hachoir_parser/file_system/linux_swap.py
90
""" Linux swap file. Documentation: Linux kernel source code, files: - mm/swapfile.c - include/linux/swap.h Author: Victor Stinner Creation date: 25 december 2006 (christmas ;-)) """ from lib.hachoir_parser import Parser from lib.hachoir_core.field import (ParserError, GenericVector, UInt32, String, Bytes, NullBytes, RawBytes) from lib.hachoir_core.endian import LITTLE_ENDIAN from lib.hachoir_core.tools import humanFilesize from lib.hachoir_core.bits import str2hex PAGE_SIZE = 4096 # Definition of MAX_SWAP_BADPAGES in Linux kernel: # (__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int) MAX_SWAP_BADPAGES = ((PAGE_SIZE - 10) - 1536) // 4 class Page(RawBytes): static_size = PAGE_SIZE*8 def __init__(self, parent, name): RawBytes.__init__(self, parent, name, PAGE_SIZE) class UUID(Bytes): static_size = 16*8 def __init__(self, parent, name): Bytes.__init__(self, parent, name, 16) def createDisplay(self): text = str2hex(self.value, format=r"%02x") return "%s-%s-%s-%s-%s" % ( text[:8], text[8:12], text[12:16], text[16:20], text[20:]) class LinuxSwapFile(Parser): PARSER_TAGS = { "id": "linux_swap", "file_ext": ("",), "category": "file_system", "min_size": PAGE_SIZE*8, "description": "Linux swap file", "magic": ( ("SWAP-SPACE", (PAGE_SIZE-10)*8), ("SWAPSPACE2", (PAGE_SIZE-10)*8), ("S1SUSPEND\0", (PAGE_SIZE-10)*8), ), } endian = LITTLE_ENDIAN def validate(self): magic = self.stream.readBytes((PAGE_SIZE-10)*8, 10) if magic not in ("SWAP-SPACE", "SWAPSPACE2", "S1SUSPEND\0"): return "Unknown magic string" if MAX_SWAP_BADPAGES < self["nb_badpage"].value: return "Invalid number of bad page (%u)" % self["nb_badpage"].value return True def getPageCount(self): """ Number of pages which can really be used for swapping: number of page minus bad pages minus one page (used for the header) """ # -1 because first page is used for the header return self["last_page"].value - self["nb_badpage"].value - 1 def createDescription(self): if self["magic"].value == "S1SUSPEND\0": text = "Suspend swap file version 1" elif self["magic"].value == "SWAPSPACE2": text = "Linux swap file version 2" else: text = "Linux swap file version 1" nb_page = self.getPageCount() return "%s, page size: %s, %s pages" % ( text, humanFilesize(PAGE_SIZE), nb_page) def createFields(self): # First kilobyte: boot sectors yield RawBytes(self, "boot", 1024, "Space for disklabel etc.") # Header yield UInt32(self, "version") yield UInt32(self, "last_page") yield UInt32(self, "nb_badpage") yield UUID(self, "sws_uuid") yield UUID(self, "sws_volume") yield NullBytes(self, "reserved", 117*4) # Read bad pages (if any) count = self["nb_badpage"].value if count: if MAX_SWAP_BADPAGES < count: raise ParserError("Invalid number of bad page (%u)" % count) yield GenericVector(self, "badpages", count, UInt32, "badpage") # Read magic padding = self.seekByte(PAGE_SIZE - 10, "padding", null=True) if padding: yield padding yield String(self, "magic", 10, charset="ASCII") # Read all pages yield GenericVector(self, "pages", self["last_page"].value, Page, "page") # Padding at the end padding = self.seekBit(self.size, "end_padding", null=True) if padding: yield padding
Eddy0402/Environment
refs/heads/master
vim/ycmd/cpp/ycm/tests/gmock/gtest/test/gtest_xml_output_unittest.py
1815
#!/usr/bin/env python # # Copyright 2006, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit test for the gtest_xml_output module""" __author__ = 'eefacm@gmail.com (Sean Mcafee)' import datetime import errno import os import re import sys from xml.dom import minidom, Node import gtest_test_utils import gtest_xml_test_utils GTEST_FILTER_FLAG = '--gtest_filter' GTEST_LIST_TESTS_FLAG = '--gtest_list_tests' GTEST_OUTPUT_FLAG = "--gtest_output" GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml" GTEST_PROGRAM_NAME = "gtest_xml_output_unittest_" SUPPORTS_STACK_TRACES = False if SUPPORTS_STACK_TRACES: STACK_TRACE_TEMPLATE = '\nStack trace:\n*' else: STACK_TRACE_TEMPLATE = '' EXPECTED_NON_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?> <testsuites tests="23" failures="4" disabled="2" errors="0" time="*" timestamp="*" name="AllTests" ad_hoc_property="42"> <testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*"> <testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/> </testsuite> <testsuite name="FailedTest" tests="1" failures="1" disabled="0" errors="0" time="*"> <testcase name="Fails" status="run" time="*" classname="FailedTest"> <failure message="gtest_xml_output_unittest_.cc:*&#x0A;Value of: 2&#x0A;Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:* Value of: 2 Expected: 1%(stack)s]]></failure> </testcase> </testsuite> <testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" errors="0" time="*"> <testcase name="Succeeds" status="run" time="*" classname="MixedResultTest"/> <testcase name="Fails" status="run" time="*" classname="MixedResultTest"> <failure message="gtest_xml_output_unittest_.cc:*&#x0A;Value of: 2&#x0A;Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:* Value of: 2 Expected: 1%(stack)s]]></failure> <failure message="gtest_xml_output_unittest_.cc:*&#x0A;Value of: 3&#x0A;Expected: 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:* Value of: 3 Expected: 2%(stack)s]]></failure> </testcase> <testcase name="DISABLED_test" status="notrun" time="*" classname="MixedResultTest"/> </testsuite> <testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" errors="0" time="*"> <testcase name="OutputsCData" status="run" time="*" classname="XmlQuotingTest"> <failure message="gtest_xml_output_unittest_.cc:*&#x0A;Failed&#x0A;XML output: &lt;?xml encoding=&quot;utf-8&quot;&gt;&lt;top&gt;&lt;![CDATA[cdata text]]&gt;&lt;/top&gt;" type=""><![CDATA[gtest_xml_output_unittest_.cc:* Failed XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]>]]&gt;<![CDATA[</top>%(stack)s]]></failure> </testcase> </testsuite> <testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" errors="0" time="*"> <testcase name="InvalidCharactersInMessage" status="run" time="*" classname="InvalidCharactersTest"> <failure message="gtest_xml_output_unittest_.cc:*&#x0A;Failed&#x0A;Invalid characters in brackets []" type=""><![CDATA[gtest_xml_output_unittest_.cc:* Failed Invalid characters in brackets []%(stack)s]]></failure> </testcase> </testsuite> <testsuite name="DisabledTest" tests="1" failures="0" disabled="1" errors="0" time="*"> <testcase name="DISABLED_test_not_run" status="notrun" time="*" classname="DisabledTest"/> </testsuite> <testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" errors="0" time="*" SetUpTestCase="yes" TearDownTestCase="aye"> <testcase name="OneProperty" status="run" time="*" classname="PropertyRecordingTest" key_1="1"/> <testcase name="IntValuedProperty" status="run" time="*" classname="PropertyRecordingTest" key_int="1"/> <testcase name="ThreeProperties" status="run" time="*" classname="PropertyRecordingTest" key_1="1" key_2="2" key_3="3"/> <testcase name="TwoValuesForOneKeyUsesLastValue" status="run" time="*" classname="PropertyRecordingTest" key_1="2"/> </testsuite> <testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" errors="0" time="*"> <testcase name="RecordProperty" status="run" time="*" classname="NoFixtureTest" key="1"/> <testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_int="1"/> <testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_string="1"/> </testsuite> <testsuite name="Single/ValueParamTest" tests="4" failures="0" disabled="0" errors="0" time="*"> <testcase name="HasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" /> <testcase name="HasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" /> <testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" /> <testcase name="AnotherTestThatHasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" /> </testsuite> <testsuite name="TypedTest/0" tests="1" failures="0" disabled="0" errors="0" time="*"> <testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/0" /> </testsuite> <testsuite name="TypedTest/1" tests="1" failures="0" disabled="0" errors="0" time="*"> <testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/1" /> </testsuite> <testsuite name="Single/TypeParameterizedTestCase/0" tests="1" failures="0" disabled="0" errors="0" time="*"> <testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/0" /> </testsuite> <testsuite name="Single/TypeParameterizedTestCase/1" tests="1" failures="0" disabled="0" errors="0" time="*"> <testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/1" /> </testsuite> </testsuites>""" % {'stack': STACK_TRACE_TEMPLATE} EXPECTED_FILTERED_TEST_XML = """<?xml version="1.0" encoding="UTF-8"?> <testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests" ad_hoc_property="42"> <testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*"> <testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/> </testsuite> </testsuites>""" EXPECTED_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?> <testsuites tests="0" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests"> </testsuites>""" GTEST_PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME) SUPPORTS_TYPED_TESTS = 'TypedTest' in gtest_test_utils.Subprocess( [GTEST_PROGRAM_PATH, GTEST_LIST_TESTS_FLAG], capture_stderr=False).output class GTestXMLOutputUnitTest(gtest_xml_test_utils.GTestXMLTestCase): """ Unit test for Google Test's XML output functionality. """ # This test currently breaks on platforms that do not support typed and # type-parameterized tests, so we don't run it under them. if SUPPORTS_TYPED_TESTS: def testNonEmptyXmlOutput(self): """ Runs a test program that generates a non-empty XML output, and tests that the XML output is expected. """ self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY_XML, 1) def testEmptyXmlOutput(self): """Verifies XML output for a Google Test binary without actual tests. Runs a test program that generates an empty XML output, and tests that the XML output is expected. """ self._TestXmlOutput('gtest_no_test_unittest', EXPECTED_EMPTY_XML, 0) def testTimestampValue(self): """Checks whether the timestamp attribute in the XML output is valid. Runs a test program that generates an empty XML output, and checks if the timestamp attribute in the testsuites tag is valid. """ actual = self._GetXmlOutput('gtest_no_test_unittest', [], 0) date_time_str = actual.documentElement.getAttributeNode('timestamp').value # datetime.strptime() is only available in Python 2.5+ so we have to # parse the expected datetime manually. match = re.match(r'(\d+)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)', date_time_str) self.assertTrue( re.match, 'XML datettime string %s has incorrect format' % date_time_str) date_time_from_xml = datetime.datetime( year=int(match.group(1)), month=int(match.group(2)), day=int(match.group(3)), hour=int(match.group(4)), minute=int(match.group(5)), second=int(match.group(6))) time_delta = abs(datetime.datetime.now() - date_time_from_xml) # timestamp value should be near the current local time self.assertTrue(time_delta < datetime.timedelta(seconds=600), 'time_delta is %s' % time_delta) actual.unlink() def testDefaultOutputFile(self): """ Confirms that Google Test produces an XML output file with the expected default name if no name is explicitly specified. """ output_file = os.path.join(gtest_test_utils.GetTempDir(), GTEST_DEFAULT_OUTPUT_FILE) gtest_prog_path = gtest_test_utils.GetTestExecutablePath( 'gtest_no_test_unittest') try: os.remove(output_file) except OSError, e: if e.errno != errno.ENOENT: raise p = gtest_test_utils.Subprocess( [gtest_prog_path, '%s=xml' % GTEST_OUTPUT_FLAG], working_dir=gtest_test_utils.GetTempDir()) self.assert_(p.exited) self.assertEquals(0, p.exit_code) self.assert_(os.path.isfile(output_file)) def testSuppressedXmlOutput(self): """ Tests that no XML file is generated if the default XML listener is shut down before RUN_ALL_TESTS is invoked. """ xml_path = os.path.join(gtest_test_utils.GetTempDir(), GTEST_PROGRAM_NAME + 'out.xml') if os.path.isfile(xml_path): os.remove(xml_path) command = [GTEST_PROGRAM_PATH, '%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path), '--shut_down_xml'] p = gtest_test_utils.Subprocess(command) if p.terminated_by_signal: # p.signal is avalable only if p.terminated_by_signal is True. self.assertFalse( p.terminated_by_signal, '%s was killed by signal %d' % (GTEST_PROGRAM_NAME, p.signal)) else: self.assert_(p.exited) self.assertEquals(1, p.exit_code, "'%s' exited with code %s, which doesn't match " 'the expected exit code %s.' % (command, p.exit_code, 1)) self.assert_(not os.path.isfile(xml_path)) def testFilteredTestXmlOutput(self): """Verifies XML output when a filter is applied. Runs a test program that executes only some tests and verifies that non-selected tests do not show up in the XML output. """ self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_FILTERED_TEST_XML, 0, extra_args=['%s=SuccessfulTest.*' % GTEST_FILTER_FLAG]) def _GetXmlOutput(self, gtest_prog_name, extra_args, expected_exit_code): """ Returns the xml output generated by running the program gtest_prog_name. Furthermore, the program's exit code must be expected_exit_code. """ xml_path = os.path.join(gtest_test_utils.GetTempDir(), gtest_prog_name + 'out.xml') gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name) command = ([gtest_prog_path, '%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path)] + extra_args) p = gtest_test_utils.Subprocess(command) if p.terminated_by_signal: self.assert_(False, '%s was killed by signal %d' % (gtest_prog_name, p.signal)) else: self.assert_(p.exited) self.assertEquals(expected_exit_code, p.exit_code, "'%s' exited with code %s, which doesn't match " 'the expected exit code %s.' % (command, p.exit_code, expected_exit_code)) actual = minidom.parse(xml_path) return actual def _TestXmlOutput(self, gtest_prog_name, expected_xml, expected_exit_code, extra_args=None): """ Asserts that the XML document generated by running the program gtest_prog_name matches expected_xml, a string containing another XML document. Furthermore, the program's exit code must be expected_exit_code. """ actual = self._GetXmlOutput(gtest_prog_name, extra_args or [], expected_exit_code) expected = minidom.parseString(expected_xml) self.NormalizeXml(actual.documentElement) self.AssertEquivalentNodes(expected.documentElement, actual.documentElement) expected.unlink() actual.unlink() if __name__ == '__main__': os.environ['GTEST_STACK_TRACE_DEPTH'] = '1' gtest_test_utils.Main()
BorisJeremic/Real-ESSI-Examples
refs/heads/master
parallel/test_cases/8NodeBrick/cantilever_different_Poisson/NumberOfDivision4/PoissonRatio0.35/extract_numerical_solution.py
170
#!/usr/bin/python import h5py import sys import numpy as np import os import re import random # find the path to my own python function: cur_dir=os.getcwd() sep='test_cases' test_DIR=cur_dir.split(sep,1)[0] scriptDIR=test_DIR+'compare_function' sys.path.append(scriptDIR) # import my own function for color and comparator from mycomparator import * from mycolor_fun import * h5_result_files = sys.argv[1:] # h5_result_file = 't_Fz.h5.feioutput' max_disp = 0 disp_at_target_point = 0 for file in h5_result_files: try: disp_at_target_point = find_max_disp(file,1) except KeyError: pass if (max_disp < disp_at_target_point): max_disp = disp_at_target_point # Write results to file f = open('numeric_result.txt','w') f.write('%.3e' % max_disp) f.close()
kmoocdev2/edx-platform
refs/heads/real_2019
common/djangoapps/entitlements/migrations/0010_backfill_refund_lock.py
13
# -*- coding: utf-8 -*- from __future__ import unicode_literals from datetime import datetime from django.db import migrations, models def backfill_refundability(apps, schema_editor): CourseEntitlementSupportDetail = apps.get_model('entitlements', 'CourseEntitlementSupportDetail') for support_detail in CourseEntitlementSupportDetail.objects.all().select_related('entitlement'): support_detail.entitlement.refund_locked = True support_detail.entitlement.save() def revert_backfill(apps, schema_editor): CourseEntitlementSupportDetail = apps.get_model('entitlements', 'CourseEntitlementSupportDetail') for support_detail in CourseEntitlementSupportDetail.objects.all().select_related('entitlement'): support_detail.entitlement.refund_locked = False support_detail.entitlement.save() class Migration(migrations.Migration): dependencies = [ ('entitlements', '0009_courseentitlement_refund_locked'), ] operations = [ migrations.RunPython(backfill_refundability, revert_backfill), ]
jandom/rdkit
refs/heads/master
rdkit/sping/PS/latin1MetricsCache.py
12
FontWidths = { 'courier': [600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600], 'courier-bold': [600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600], 'courier-oblique': [600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600], 'courier-boldoblique': [600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600, 600], 'helvetica': [278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 355, 556, 556, 889, 667, 191, 333, 333, 389, 584, 278, 333, 278, 278, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 278, 278, 584, 584, 584, 556, 1015, 667, 667, 722, 722, 667, 611, 778, 722, 278, 500, 667, 556, 833, 722, 778, 667, 778, 722, 667, 611, 722, 667, 944, 667, 667, 611, 278, 278, 278, 469, 556, 333, 556, 556, 500, 556, 556, 278, 556, 556, 222, 222, 500, 222, 833, 556, 556, 556, 556, 333, 500, 278, 556, 500, 722, 500, 500, 500, 334, 260, 334, 584, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 333, 556, 556, 556, 556, 260, 556, 333, 737, 370, 556, 584, 333, 737, 333, 400, 584, 333, 333, 333, 556, 537, 278, 333, 333, 365, 556, 834, 834, 834, 611, 667, 667, 667, 667, 667, 667, 1000, 722, 667, 667, 667, 667, 278, 278, 278, 278, 722, 722, 778, 778, 778, 778, 778, 584, 778, 722, 722, 722, 722, 667, 667, 611, 556, 556, 556, 556, 556, 556, 889, 500, 556, 556, 556, 556, 278, 278, 278, 278, 556, 556, 556, 556, 556, 556, 556, 584, 611, 556, 556, 556, 556, 500, 556, 500], 'helvetica-bold': [833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 278, 333, 474, 556, 556, 889, 722, 238, 333, 333, 389, 584, 278, 333, 278, 278, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 333, 333, 584, 584, 584, 611, 975, 722, 722, 722, 722, 667, 611, 778, 722, 278, 556, 722, 611, 833, 722, 778, 667, 778, 722, 667, 611, 722, 667, 944, 667, 667, 611, 333, 278, 333, 584, 556, 333, 556, 611, 556, 611, 556, 333, 611, 611, 278, 278, 556, 278, 889, 611, 611, 611, 611, 389, 556, 333, 611, 556, 778, 556, 556, 500, 389, 280, 389, 584, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 278, 333, 556, 556, 556, 556, 280, 556, 333, 737, 370, 556, 584, 333, 737, 333, 400, 584, 333, 333, 333, 611, 556, 278, 333, 333, 365, 556, 834, 834, 834, 611, 722, 722, 722, 722, 722, 722, 1000, 722, 667, 667, 667, 667, 278, 278, 278, 278, 722, 722, 778, 778, 778, 778, 778, 584, 778, 722, 722, 722, 722, 667, 667, 611, 556, 556, 556, 556, 556, 556, 889, 556, 556, 556, 556, 556, 278, 278, 278, 278, 611, 611, 611, 611, 611, 611, 611, 584, 611, 611, 611, 611, 611, 556, 611, 556], 'helvetica-oblique': [833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 278, 278, 355, 556, 556, 889, 667, 191, 333, 333, 389, 584, 278, 333, 278, 278, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 278, 278, 584, 584, 584, 556, 1015, 667, 667, 722, 722, 667, 611, 778, 722, 278, 500, 667, 556, 833, 722, 778, 667, 778, 722, 667, 611, 722, 667, 944, 667, 667, 611, 278, 278, 278, 469, 556, 333, 556, 556, 500, 556, 556, 278, 556, 556, 222, 222, 500, 222, 833, 556, 556, 556, 556, 333, 500, 278, 556, 500, 722, 500, 500, 500, 334, 260, 334, 584, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 278, 333, 556, 556, 556, 556, 260, 556, 333, 737, 370, 556, 584, 333, 737, 333, 400, 584, 333, 333, 333, 556, 537, 278, 333, 333, 365, 556, 834, 834, 834, 611, 667, 667, 667, 667, 667, 667, 1000, 722, 667, 667, 667, 667, 278, 278, 278, 278, 722, 722, 778, 778, 778, 778, 778, 584, 778, 722, 722, 722, 722, 667, 667, 611, 556, 556, 556, 556, 556, 556, 889, 500, 556, 556, 556, 556, 278, 278, 278, 278, 556, 556, 556, 556, 556, 556, 556, 584, 611, 556, 556, 556, 556, 500, 556, 500], 'helvetica-boldoblique': [ 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 278, 333, 474, 556, 556, 889, 722, 238, 333, 333, 389, 584, 278, 333, 278, 278, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 333, 333, 584, 584, 584, 611, 975, 722, 722, 722, 722, 667, 611, 778, 722, 278, 556, 722, 611, 833, 722, 778, 667, 778, 722, 667, 611, 722, 667, 944, 667, 667, 611, 333, 278, 333, 584, 556, 333, 556, 611, 556, 611, 556, 333, 611, 611, 278, 278, 556, 278, 889, 611, 611, 611, 611, 389, 556, 333, 611, 556, 778, 556, 556, 500, 389, 280, 389, 584, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 278, 333, 556, 556, 556, 556, 280, 556, 333, 737, 370, 556, 584, 333, 737, 333, 400, 584, 333, 333, 333, 611, 556, 278, 333, 333, 365, 556, 834, 834, 834, 611, 722, 722, 722, 722, 722, 722, 1000, 722, 667, 667, 667, 667, 278, 278, 278, 278, 722, 722, 778, 778, 778, 778, 778, 584, 778, 722, 722, 722, 722, 667, 667, 611, 556, 556, 556, 556, 556, 556, 889, 556, 556, 556, 556, 556, 278, 278, 278, 278, 611, 611, 611, 611, 611, 611, 611, 584, 611, 611, 611, 611, 611, 556, 611, 556 ], 'times-roman': [ 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 250, 333, 408, 500, 500, 833, 778, 180, 333, 333, 500, 564, 250, 333, 250, 278, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 278, 278, 564, 564, 564, 444, 921, 722, 667, 667, 722, 611, 556, 722, 722, 333, 389, 722, 611, 889, 722, 722, 556, 722, 667, 556, 611, 722, 722, 944, 722, 722, 611, 333, 278, 333, 469, 500, 333, 444, 500, 444, 500, 444, 333, 500, 500, 278, 278, 500, 278, 778, 500, 500, 500, 500, 333, 389, 278, 500, 500, 722, 500, 500, 444, 480, 200, 480, 541, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 250, 333, 500, 500, 500, 500, 200, 500, 333, 760, 276, 500, 564, 333, 760, 333, 400, 564, 300, 300, 333, 500, 453, 250, 333, 300, 310, 500, 750, 750, 750, 444, 722, 722, 722, 722, 722, 722, 889, 667, 611, 611, 611, 611, 333, 333, 333, 333, 722, 722, 722, 722, 722, 722, 722, 564, 722, 722, 722, 722, 722, 722, 556, 500, 444, 444, 444, 444, 444, 444, 667, 444, 444, 444, 444, 444, 278, 278, 278, 278, 500, 500, 500, 500, 500, 500, 500, 564, 500, 500, 500, 500, 500, 500, 500, 500 ], 'times-bold': [944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 250, 333, 555, 500, 500, 1000, 833, 278, 333, 333, 500, 570, 250, 333, 250, 278, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 333, 333, 570, 570, 570, 500, 930, 722, 667, 722, 722, 667, 611, 778, 778, 389, 500, 778, 667, 944, 722, 778, 611, 778, 722, 556, 667, 722, 722, 1000, 722, 722, 667, 333, 278, 333, 581, 500, 333, 500, 556, 444, 556, 444, 333, 500, 556, 278, 333, 556, 278, 833, 556, 500, 556, 556, 444, 389, 333, 556, 500, 722, 500, 500, 444, 394, 220, 394, 520, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 944, 250, 333, 500, 500, 500, 500, 220, 500, 333, 747, 300, 500, 570, 333, 747, 333, 400, 570, 300, 300, 333, 556, 540, 250, 333, 300, 330, 500, 750, 750, 750, 500, 722, 722, 722, 722, 722, 722, 1000, 722, 667, 667, 667, 667, 389, 389, 389, 389, 722, 722, 778, 778, 778, 778, 778, 570, 778, 722, 722, 722, 722, 722, 611, 556, 500, 500, 500, 500, 500, 500, 722, 444, 444, 444, 444, 444, 278, 278, 278, 278, 500, 556, 500, 500, 500, 500, 500, 570, 500, 556, 556, 556, 556, 500, 556, 500], 'times-italic': [833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 250, 333, 420, 500, 500, 833, 778, 214, 333, 333, 500, 675, 250, 333, 250, 278, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 333, 333, 675, 675, 675, 500, 920, 611, 611, 667, 722, 611, 611, 722, 722, 333, 444, 667, 556, 833, 667, 722, 611, 722, 611, 500, 556, 722, 611, 833, 611, 556, 556, 389, 278, 389, 422, 500, 333, 500, 500, 444, 500, 444, 278, 500, 500, 278, 278, 444, 278, 722, 500, 500, 500, 500, 389, 389, 278, 500, 444, 667, 444, 444, 389, 400, 275, 400, 541, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 833, 250, 389, 500, 500, 500, 500, 275, 500, 333, 760, 276, 500, 675, 333, 760, 333, 400, 675, 300, 300, 333, 500, 523, 250, 333, 300, 310, 500, 750, 750, 750, 500, 611, 611, 611, 611, 611, 611, 889, 667, 611, 611, 611, 611, 333, 333, 333, 333, 722, 667, 722, 722, 722, 722, 722, 675, 722, 722, 722, 722, 722, 556, 611, 500, 500, 500, 500, 500, 500, 500, 667, 444, 444, 444, 444, 444, 278, 278, 278, 278, 500, 500, 500, 500, 500, 500, 500, 675, 500, 500, 500, 500, 500, 444, 500, 444], 'times-bolditalic': [ 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 250, 389, 555, 500, 500, 833, 778, 278, 333, 333, 500, 570, 250, 333, 250, 278, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 333, 333, 570, 570, 570, 500, 832, 667, 667, 667, 722, 667, 667, 722, 778, 389, 500, 667, 611, 889, 722, 722, 611, 722, 667, 556, 611, 722, 667, 889, 667, 611, 611, 333, 278, 333, 570, 500, 333, 500, 500, 444, 500, 444, 333, 500, 556, 278, 278, 500, 278, 778, 556, 500, 500, 500, 389, 389, 278, 556, 444, 667, 500, 444, 389, 348, 220, 348, 570, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 889, 250, 389, 500, 500, 500, 500, 220, 500, 333, 747, 266, 500, 606, 333, 747, 333, 400, 570, 300, 300, 333, 576, 500, 250, 333, 300, 300, 500, 750, 750, 750, 500, 667, 667, 667, 667, 667, 667, 944, 667, 667, 667, 667, 667, 389, 389, 389, 389, 722, 722, 722, 722, 722, 722, 722, 570, 722, 722, 722, 722, 722, 611, 611, 500, 500, 500, 500, 500, 500, 500, 722, 444, 444, 444, 444, 444, 278, 278, 278, 278, 500, 556, 500, 500, 500, 500, 500, 570, 500, 556, 556, 556, 556, 444, 500, 444 ], 'symbol': [ 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 333, 250, 500, 250, 833, 778, 250, 333, 333, 250, 549, 250, 250, 250, 278, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 278, 278, 549, 549, 549, 444, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 333, 250, 333, 250, 500, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 480, 200, 480, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 713, 250, 250, 250, 400, 549, 250, 250, 250, 576, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 549, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 549, 250, 250, 250, 250, 250, 250, 250, 250 ], 'zapfdingbats': [278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278] }
dataxu/ansible
refs/heads/dx-stable-2.5
lib/ansible/modules/network/aci/aci_tenant_ep_retention_policy.py
14
#!/usr/bin/python # -*- coding: utf-8 -*- # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- module: aci_tenant_ep_retention_policy short_description: Manage End Point (EP) retention protocol policies (fv:EpRetPol) description: - Manage End Point (EP) retention protocol policies on Cisco ACI fabrics. notes: - The C(tenant) used must exist before using this module in your playbook. The M(aci_tenant) module can be used for this. - More information about the internal APIC class B(fv:EpRetPol) from L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/). author: - Swetha Chunduri (@schunduri) version_added: '2.4' options: tenant: description: - The name of an existing tenant. aliases: [ tenant_name ] epr_policy: description: - The name of the end point retention policy. aliases: [ epr_name, name ] bounce_age: description: - Bounce Entry Aging Interval (range 150secs - 65535secs) - 0 is used for infinite. default: 630 bounce_trigger: description: - Determines if the bounce entries are installed by RARP Flood or COOP Protocol. - The APIC defaults new End Point Retention Policies to C(coop). default: coop hold_interval: description: - Hold Interval (range 5secs - 65535secs). default: 300 local_ep_interval: description: - Local end point Aging Interval (range 120secs - 65535secs). - 0 is used for infinite. default: 900 remote_ep_interval: description: - Remote end point Aging Interval (range 120secs - 65535secs). - O is used for infinite. default: 300 move_frequency: description: - Move frequency per second (range 0secs - 65535secs). - 0 is used for none. default: 256 description: description: - Description for the End point rentention policy. aliases: [ descr ] state: description: - Use C(present) or C(absent) for adding or removing. - Use C(query) for listing an object or multiple objects. choices: [ absent, present, query ] default: present extends_documentation_fragment: aci ''' EXAMPLES = r''' - name: Add a new EPR policy aci_epr_policy: host: apic username: admin password: SomeSecretPassword tenant: production epr_policy: EPRPol1 bounce_age: 630 hold_interval: 300 local_ep_interval: 900 remote_ep_interval: 300 move_frequency: 256 description: test state: present - name: Remove an EPR policy aci_epr_policy: host: apic username: admin password: SomeSecretPassword tenant: production epr_policy: EPRPol1 state: absent - name: Query an EPR policy aci_epr_policy: host: apic username: admin password: SomeSecretPassword tenant: production epr_policy: EPRPol1 state: query - name: Query all EPR policies aci_epr_policy: host: apic username: admin password: SomeSecretPassword state: query ''' RETURN = r''' current: description: The existing configuration from the APIC after the module has finished returned: success type: list sample: [ { "fvTenant": { "attributes": { "descr": "Production environment", "dn": "uni/tn-production", "name": "production", "nameAlias": "", "ownerKey": "", "ownerTag": "" } } } ] error: description: The error information as returned from the APIC returned: failure type: dict sample: { "code": "122", "text": "unknown managed object class foo" } raw: description: The raw output returned by the APIC REST API (xml or json) returned: parse error type: string sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>' sent: description: The actual/minimal configuration pushed to the APIC returned: info type: list sample: { "fvTenant": { "attributes": { "descr": "Production environment" } } } previous: description: The original configuration from the APIC before the module has started returned: info type: list sample: [ { "fvTenant": { "attributes": { "descr": "Production", "dn": "uni/tn-production", "name": "production", "nameAlias": "", "ownerKey": "", "ownerTag": "" } } } ] proposed: description: The assembled configuration from the user-provided parameters returned: info type: dict sample: { "fvTenant": { "attributes": { "descr": "Production environment", "name": "production" } } } filter_string: description: The filter string used for the request returned: failure or debug type: string sample: ?rsp-prop-include=config-only method: description: The HTTP method used for the request to the APIC returned: failure or debug type: string sample: POST response: description: The HTTP response from the APIC returned: failure or debug type: string sample: OK (30 bytes) status: description: The HTTP status from the APIC returned: failure or debug type: int sample: 200 url: description: The HTTP url used for the request to the APIC returned: failure or debug type: string sample: https://10.11.12.13/api/mo/uni/tn-production.json ''' from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec from ansible.module_utils.basic import AnsibleModule BOUNCE_TRIG_MAPPING = dict(coop='protocol', rarp='rarp-flood') def main(): argument_spec = aci_argument_spec() argument_spec.update( tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects epr_policy=dict(type='str', aliases=['epr_name', 'name']), bounce_age=dict(type='int'), bounce_trigger=dict(type='str', choices=['coop', 'flood']), hold_interval=dict(type='int'), local_ep_interval=dict(type='int'), remote_ep_interval=dict(type='int'), description=dict(type='str', aliases=['descr']), move_frequency=dict(type='int'), state=dict(type='str', default='present', choices=['absent', 'present', 'query']), method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6 protocol=dict(type='str', removed_in_version='2.6'), # Deprecated in v2.6 ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_if=[ ['state', 'absent', ['epr_policy', 'tenant']], ['state', 'present', ['epr_policy', 'tenant']], ], ) epr_policy = module.params['epr_policy'] bounce_age = module.params['bounce_age'] if bounce_age is not None and bounce_age != 0 and bounce_age not in range(150, 65536): module.fail_json(msg="The bounce_age must be a value of 0 or between 150 and 65535") if bounce_age == 0: bounce_age = 'infinite' bounce_trigger = module.params['bounce_trigger'] if bounce_trigger is not None: bounce_trigger = BOUNCE_TRIG_MAPPING[bounce_trigger] description = module.params['description'] hold_interval = module.params['hold_interval'] if hold_interval is not None and hold_interval not in range(5, 65536): module.fail_json(msg="The hold_interval must be a value between 5 and 65535") local_ep_interval = module.params['local_ep_interval'] if local_ep_interval is not None and local_ep_interval != 0 and local_ep_interval not in range(120, 65536): module.fail_json(msg="The local_ep_interval must be a value of 0 or between 120 and 65535") if local_ep_interval == 0: local_ep_interval = "infinite" move_frequency = module.params['move_frequency'] if move_frequency is not None and move_frequency not in range(65536): module.fail_json(msg="The move_frequency must be a value between 0 and 65535") if move_frequency == 0: move_frequency = "none" remote_ep_interval = module.params['remote_ep_interval'] if remote_ep_interval is not None and remote_ep_interval not in range(120, 65536): module.fail_json(msg="The remote_ep_interval must be a value of 0 or between 120 and 65535") if remote_ep_interval == 0: remote_ep_interval = "infinite" state = module.params['state'] tenant = module.params['tenant'] aci = ACIModule(module) aci.construct_url( root_class=dict( aci_class='fvTenant', aci_rn='tn-{0}'.format(tenant), filter_target='eq(fvTenant.name, "{0}")'.format(tenant), module_object=tenant, ), subclass_1=dict( aci_class='fvEpRetPol', aci_rn='epRPol-{0}'.format(epr_policy), filter_target='eq(fvEpRetPol.name, "{0}")'.format(epr_policy), module_object=epr_policy, ), ) aci.get_existing() if state == 'present': aci.payload( aci_class='fvEpRetPol', class_config=dict( name=epr_policy, descr=description, bounceAgeIntvl=bounce_age, bounceTrig=bounce_trigger, holdIntvl=hold_interval, localEpAgeIntvl=local_ep_interval, remoteEpAgeIntvl=remote_ep_interval, moveFreq=move_frequency, ), ) aci.get_diff(aci_class='fvEpRetPol') aci.post_config() elif state == 'absent': aci.delete_config() aci.exit_json() if __name__ == "__main__": main()
knxd/PyKNyX
refs/heads/master
tests/core/dptXlator/dptXlatorFactory.py
2
# -*- coding: utf-8 -*- from pyknyx.core.dptXlator.dptXlatorFactory import * import unittest # Mute logger from pyknyx.services.logger import logging logger = logging.getLogger(__name__) logging.getLogger("pyknyx").setLevel(logging.ERROR) class DPTMainTypeMapperTestCase(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_constructor(self): class DummyClass(object): pass with self.assertRaises(DPTXlatorValueError): DPTMainTypeMapper("1.xxx", DummyClass, "Dummy") DPTMainTypeMapper("1.xxx", DPTXlatorBoolean, "Dummy") class DPTXlatorFactoryObjectTestCase(unittest.TestCase): def setUp(self): pass def tearDown(self): pass #def test_constructor(self): #print DPTXlatorFactory().handledMainDPTIDs
ddietze/pyFSRS
refs/heads/master
available_modules/Devices/dummyValveY.py
2
""" .. module: dummyValveY :platform: Windows .. moduleauthor:: Scott Ellis skellis@berkeley.edu> dummyAxis provides a dummy axis device for testing purposes. You can use this file as a starting point when writing your own axis device module for pyFSRS. .. This file is part of the pyFSRS app. This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. Copyright 2014-2016 Daniel Dietze <daniel.dietze@berkeley.edu>. """ # base class import core.FSRSModule as module def howMany(): return 1 class dummyValveY(module.Valve): """A prototype for an axis module for pyFSRS. """ def __init__(self): module.Valve.__init__(self) self.name = "Dummy Valve Y" prop = [] prop.append({"label": "Valve", "type": "label", "value": ""}) prop.append({"label": "Position", "type": "input", "value": "0.0", "event": "onMove"}) prop.append({"label": "Speed", "type": "choice", "value": 0, "choices": ["fast", "slow"], "event": None}) # convert dictionary to properties object self.parsePropertiesDict(prop) def initialize(self, others=[]): count = module.Valve.initialize(self, others) self.getPropertyByLabel("valve").setValue("#" + str(count + 1)) # return current position def pos(self): return self.position # goto new position def goto(self, pos): self.position = pos #print "\rValve"+ self.getPropertyByLabel("valve").getValue()+"moved to", pos, "\t" # should return True if stage is still moving def is_moving(self): return False def onMove(self, event): pos = float(self.getPropertyByLabel("position").getValue()) self.goto(pos)
DinoV/PTVS
refs/heads/master
Python/Product/Pyvot/Pyvot/xl/_impl/table.py
7
# PyVot # Copyright(c) Microsoft Corporation # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the License); you may not use # this file except in compliance with the License. You may obtain a copy of the # License at http://www.apache.org/licenses/LICENSE-2.0 # # THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS # OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY # IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, # MERCHANTABLITY OR NON-INFRINGEMENT. # # See the Apache Version 2.0 License for specific language governing # permissions and limitations under the License. import xl._impl.com_utils as com_utils from xl.cache import CacheManager, cache_result, enable_caching from xl.range import Range # Table abstraction. Provides a uniform abstraction over Excel concepts: # - Excel ListObject (Ctrl+T), 1st class tables # - Excel AutoFilters, but aren't 1st class objects. # # Services provided: # - header (column names) # - data ranges. # - visiblity and rows # - better support for adding new computed columns class Table(object): def __init__(self, name, rHeader, rData, from_auto_filter=False): self.rHeader = rHeader # may be null self.rData = rData self._name = name self._from_auto_filter = from_auto_filter if (rHeader != None): assert not rHeader.intersects(rData) @cache_result @enable_caching def _getTableColumn(self, name): """Returns a Range for the data in the given column name. None if no column.""" if self.rHeader == None: return None name = name.lower() for idx, header in enumerate(self.rHeader): if header is None: continue # Header cells can be empty if header.lower() == name: return self.rData.column_vector(idx) # get total number of rows in the table. def getRowCount(self): return self.rData.num_rows def getVisibleRowCount(self): return self.rData.getVisibleRowCount() @property def data_rows(self): """Returns s list of data rows in the table. Each row is a list of values""" # A small rData may have a vector or scalar shape. However, we wish to # always return a list of lists return self.rData.as_matrix.get() @cache_result @property def table_range(self): """The full Range of this table; encompasses headers (if any) as well as data""" assert not self.rData is None app = self.rData._full_xlRange.Application if self.rHeader is None: return self.rData return Range(app.Union(self.rData._full_xlRange, self.rHeader._full_xlRange), with_hidden=False) def Name(self): return self._name def append_empty_columns(self, num_new_cols): """Appends the specified number of columns to the right of this table. The columns are empty, except for the possibility of Excel-generated default column headers. The inserted range, including headers, is returned""" # We assume below that at least one column is added # $$$ Decide how to represent empty Ranges() if num_new_cols == 0: return None adjacent = self._adjacent_column_range(num_new_cols) self._reserve_column_space(adjacent) # The insert has helpfully updated xlRanges from underneath us. That is, adjacent has shifted by num_new_cols adjacent = self._adjacent_column_range(num_new_cols) # AutoFilter tables are hard to extend, but easy to promote to a 'real' table if self._from_auto_filter: self._convert_to_listobject_table() # For ListObject tables, putting a value in a column header triggers table-ification magic # Removing the value generates a default column name. Neat. # This accomplishes nothing if this is an AutoFilter table # $$$ update this when slicing is added adj_header_range = Range(adjacent._full_xlRange.Rows(1), with_hidden=True) adj_header_range.set( [u" "] * num_new_cols ) adj_header_range.set( [u""] * num_new_cols ) # adjacent is now a subset of the inserted empty space # However, this instance's rData and rHeader attributes are now out of date # We have been possibly using hidden cells above, but want to return a safer range to users # $$$ investigate if updating rData / rHeader is vital return adjacent.excluding_hidden def _adjacent_column_range(self, num_cols): """Returns a num_cols-wide range right-adjacent to this table. The range shares the same height, incl. the header row if applicable. This does not modify the worksheet. The returned range includes hidden cells.""" # $$$ update this when slicing is added # We remove filtering here, because we should insert after any hidden cols full_table = self.table_range.including_hidden last_existing_col = Range(full_table._full_xlRange.Columns(full_table.num_columns), with_hidden=True) # first_new_col_xlRange = last_existing_col_xlRange._offset_unfiltered(0, 1) first_new_col = last_existing_col._offset_unfiltered(cols=1) # Add additional columns beyond the first new_cols = first_new_col._adjust_unfiltered_size(cols=num_cols - 1) return new_cols def _reserve_column_space(self, range): """Reserve at least the requested range for new Table columns. The given range is assumed to be adjacent (on the right) of this Table. If unable to insert the given range, (e.g. because it would break a table further to the right), full (worksheet) columns are inserted instead.""" CacheManager.invalidate_all_caches() # xlFormatFromLeftOrAbove encourages consistent formatting with the original table (to the left) try: range._full_xlRange.Insert(CopyOrigin=com_utils.constants.xlFormatFromLeftOrAbove, Shift=com_utils.constants.xlToRight) except com_utils.com_error: # Oops, insert failed. This is probably because Excel is refusing to break a right-adjacent table # We try again, inserting a whole column. This also breaks things in many cases, but at Excel doesn't complain range._full_xlRange.EntireColumn.Insert(CopyOrigin=com_utils.constants.xlFormatFromLeftOrAbove, Shift=com_utils.constants.xlToRight) def _convert_to_listobject_table(self): """Converts this Table's underlying Excel representation to an Excel ListObject This operation can only be applied to Tables backed by a sheet AutoFilter (see tableFromAutoFilter) AutoFilter state is preserved - i.e., visible rows will not change.""" assert self._from_auto_filter, "already a ListObject table" xlWorksheet = self.rData._full_xlRange.Worksheet xlWorksheet.ListObjects.Add(SourceType=com_utils.constants.xlSrcRange, Source=self.table_range._full_xlRange) self._from_auto_filter = False def tableFromListObject(xlListObject): """Given an ListObject, return a Table abstraction""" # See more about ListObjects: http://msdn.microsoft.com/en-us/library/microsoft.office.interop.excel.listobject_members.aspx rHeader = Range(xlListObject.HeaderRowRange, with_hidden=False) rData = Range(xlListObject.DataBodyRange, with_hidden=False) return Table(xlListObject.Name, rHeader, rData, from_auto_filter=False) def tableFromAutoFilter(xlSheet): """Each excel sheet can have 1 auto-filter. Return it if present. Else return None.""" a = xlSheet.AutoFilter if a == None: return None # no autofilter on this sheet # We have to manually split out the header and range. r = a.Range # In certain peculiar cases, Worksheet.AutoFilter is set, but # actually refers to a ListObject table. See excel_issues.py if r.ListObject != None: return None (r1,c1,r2,c2) = _getBounds(r) rHeader = Range(xlSheet.Range(xlSheet.Cells(r1, c1), xlSheet.Cells(r1, c2)), with_hidden=False) rData = Range(xlSheet.Range(xlSheet.Cells(r1+1, c1), xlSheet.Cells(r2, c2)), with_hidden=False) return Table("AutoFilter " + xlSheet.Name, rHeader, rData, from_auto_filter=True) # Given an xlRange, get the (1-based) row, column bounds for the range. def _getBounds(xlRange): x = xlRange.Columns c1 = x(1).Column c2 = x(len(x)).Column y =xlRange.Rows r1 = y(1).Row r2 = y(len(y)).Row return (r1, c1, r2, c2)
hyperized/ansible
refs/heads/devel
contrib/inventory/collins.py
37
#!/usr/bin/env python """ Collins external inventory script ================================= Ansible has a feature where instead of reading from /etc/ansible/hosts as a text file, it can query external programs to obtain the list of hosts, groups the hosts are in, and even variables to assign to each host. Collins is a hardware asset management system originally developed by Tumblr for tracking new hardware as it built out its own datacenters. It exposes a rich API for manipulating and querying one's hardware inventory, which makes it an ideal 'single point of truth' for driving systems automation like Ansible. Extensive documentation on Collins, including a quickstart, API docs, and a full reference manual, can be found here: http://tumblr.github.io/collins This script adds support to Ansible for obtaining a dynamic inventory of assets in your infrastructure, grouping them in Ansible by their useful attributes, and binding all facts provided by Collins to each host so that they can be used to drive automation. Some parts of this script were cribbed shamelessly from mdehaan's Cobbler inventory script. To use it, copy it to your repo and pass -i <collins script> to the ansible or ansible-playbook command; if you'd like to use it by default, simply copy collins.ini to /etc/ansible and this script to /etc/ansible/hosts. Alongside the options set in collins.ini, there are several environment variables that will be used instead of the configured values if they are set: - COLLINS_USERNAME - specifies a username to use for Collins authentication - COLLINS_PASSWORD - specifies a password to use for Collins authentication - COLLINS_ASSET_TYPE - specifies a Collins asset type to use during querying; this can be used to run Ansible automation against different asset classes than server nodes, such as network switches and PDUs - COLLINS_CONFIG - specifies an alternative location for collins.ini, defaults to <location of collins.py>/collins.ini If errors are encountered during operation, this script will return an exit code of 255; otherwise, it will return an exit code of 0. Collins attributes are accessible as variables in ansible via the COLLINS['attribute_name']. Tested against Ansible 1.8.2 and Collins 1.3.0. """ # (c) 2014, Steve Salevan <steve.salevan@gmail.com> # # This file is part of Ansible. # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ###################################################################### import argparse import logging import os import re import sys from time import time import traceback import json from ansible.module_utils.six import iteritems from ansible.module_utils.six.moves import configparser as ConfigParser from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils.urls import open_url class CollinsDefaults(object): ASSETS_API_ENDPOINT = '%s/api/assets' SPECIAL_ATTRIBUTES = set([ 'CREATED', 'DELETED', 'UPDATED', 'STATE', ]) LOG_FORMAT = '%(asctime)-15s %(message)s' class Error(Exception): pass class MaxRetriesError(Error): pass class CollinsInventory(object): def __init__(self): """ Constructs CollinsInventory object and reads all configuration. """ self.inventory = dict() # A list of groups and the hosts in that group self.cache = dict() # Details about hosts in the inventory # Read settings and parse CLI arguments self.read_settings() self.parse_cli_args() logging.basicConfig(format=CollinsDefaults.LOG_FORMAT, filename=self.log_location) self.log = logging.getLogger('CollinsInventory') def _asset_get_attribute(self, asset, attrib): """ Returns a user-defined attribute from an asset if it exists; otherwise, returns None. """ if 'ATTRIBS' in asset: for attrib_block in asset['ATTRIBS'].keys(): if attrib in asset['ATTRIBS'][attrib_block]: return asset['ATTRIBS'][attrib_block][attrib] return None def _asset_has_attribute(self, asset, attrib): """ Returns whether a user-defined attribute is present on an asset. """ if 'ATTRIBS' in asset: for attrib_block in asset['ATTRIBS'].keys(): if attrib in asset['ATTRIBS'][attrib_block]: return True return False def run(self): """ Main execution path """ # Updates cache if cache is not present or has expired. successful = True if self.args.refresh_cache: successful = self.update_cache() elif not self.is_cache_valid(): successful = self.update_cache() else: successful = self.load_inventory_from_cache() successful &= self.load_cache_from_cache() data_to_print = "" # Data to print if self.args.host: data_to_print = self.get_host_info() elif self.args.list: # Display list of instances for inventory data_to_print = self.json_format_dict(self.inventory, self.args.pretty) else: # default action with no options data_to_print = self.json_format_dict(self.inventory, self.args.pretty) print(data_to_print) return successful def find_assets(self, attributes=None, operation='AND'): """ Obtains Collins assets matching the provided attributes. """ attributes = {} if attributes is None else attributes # Formats asset search query to locate assets matching attributes, using # the CQL search feature as described here: # http://tumblr.github.io/collins/recipes.html attributes_query = ['='.join(attr_pair) for attr_pair in iteritems(attributes)] query_parameters = { 'details': ['True'], 'operation': [operation], 'query': attributes_query, 'remoteLookup': [str(self.query_remote_dcs)], 'size': [self.results_per_query], 'type': [self.collins_asset_type], } assets = [] cur_page = 0 num_retries = 0 # Locates all assets matching the provided query, exhausting pagination. while True: if num_retries == self.collins_max_retries: raise MaxRetriesError("Maximum of %s retries reached; giving up" % self.collins_max_retries) query_parameters['page'] = cur_page query_url = "%s?%s" % ( (CollinsDefaults.ASSETS_API_ENDPOINT % self.collins_host), urlencode(query_parameters, doseq=True) ) try: response = open_url(query_url, timeout=self.collins_timeout_secs, url_username=self.collins_username, url_password=self.collins_password, force_basic_auth=True) json_response = json.loads(response.read()) # Adds any assets found to the array of assets. assets += json_response['data']['Data'] # If we've retrieved all of our assets, breaks out of the loop. if len(json_response['data']['Data']) == 0: break cur_page += 1 num_retries = 0 except Exception: self.log.error("Error while communicating with Collins, retrying:\n%s", traceback.format_exc()) num_retries += 1 return assets def is_cache_valid(self): """ Determines if the cache files have expired, or if it is still valid """ if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: if os.path.isfile(self.cache_path_inventory): return True return False def read_settings(self): """ Reads the settings from the collins.ini file """ config_loc = os.getenv('COLLINS_CONFIG', os.path.dirname(os.path.realpath(__file__)) + '/collins.ini') config = ConfigParser.SafeConfigParser() config.read(os.path.dirname(os.path.realpath(__file__)) + '/collins.ini') self.collins_host = config.get('collins', 'host') self.collins_username = os.getenv('COLLINS_USERNAME', config.get('collins', 'username')) self.collins_password = os.getenv('COLLINS_PASSWORD', config.get('collins', 'password')) self.collins_asset_type = os.getenv('COLLINS_ASSET_TYPE', config.get('collins', 'asset_type')) self.collins_timeout_secs = config.getint('collins', 'timeout_secs') self.collins_max_retries = config.getint('collins', 'max_retries') self.results_per_query = config.getint('collins', 'results_per_query') self.ip_address_index = config.getint('collins', 'ip_address_index') self.query_remote_dcs = config.getboolean('collins', 'query_remote_dcs') self.prefer_hostnames = config.getboolean('collins', 'prefer_hostnames') cache_path = config.get('collins', 'cache_path') self.cache_path_cache = cache_path + \ '/ansible-collins-%s.cache' % self.collins_asset_type self.cache_path_inventory = cache_path + \ '/ansible-collins-%s.index' % self.collins_asset_type self.cache_max_age = config.getint('collins', 'cache_max_age') log_path = config.get('collins', 'log_path') self.log_location = log_path + '/ansible-collins.log' def parse_cli_args(self): """ Command line argument processing """ parser = argparse.ArgumentParser( description='Produces an Ansible Inventory file based on Collins') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests to Collins ' '(default: False - use cache files)') parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print all JSON output') self.args = parser.parse_args() def update_cache(self): """ Make calls to Collins and saves the output in a cache """ self.cache = dict() self.inventory = dict() # Locates all server assets from Collins. try: server_assets = self.find_assets() except Exception: self.log.error("Error while locating assets from Collins:\n%s", traceback.format_exc()) return False for asset in server_assets: # Determines the index to retrieve the asset's IP address either by an # attribute set on the Collins asset or the pre-configured value. if self._asset_has_attribute(asset, 'ANSIBLE_IP_INDEX'): ip_index = self._asset_get_attribute(asset, 'ANSIBLE_IP_INDEX') try: ip_index = int(ip_index) except Exception: self.log.error( "ANSIBLE_IP_INDEX attribute on asset %s not an integer: %s", asset, ip_index) else: ip_index = self.ip_address_index asset['COLLINS'] = {} # Attempts to locate the asset's primary identifier (hostname or IP address), # which will be used to index the asset throughout the Ansible inventory. if self.prefer_hostnames and self._asset_has_attribute(asset, 'HOSTNAME'): asset_identifier = self._asset_get_attribute(asset, 'HOSTNAME') elif 'ADDRESSES' not in asset: self.log.warning("No IP addresses found for asset '%s', skipping", asset) continue elif len(asset['ADDRESSES']) < ip_index + 1: self.log.warning( "No IP address found at index %s for asset '%s', skipping", ip_index, asset) continue else: asset_identifier = asset['ADDRESSES'][ip_index]['ADDRESS'] # Adds an asset index to the Ansible inventory based upon unpacking # the name of the asset's current STATE from its dictionary. if 'STATE' in asset['ASSET'] and asset['ASSET']['STATE']: state_inventory_key = self.to_safe( 'STATE-%s' % asset['ASSET']['STATE']['NAME']) self.push(self.inventory, state_inventory_key, asset_identifier) # Indexes asset by all user-defined Collins attributes. if 'ATTRIBS' in asset: for attrib_block in asset['ATTRIBS'].keys(): for attrib in asset['ATTRIBS'][attrib_block].keys(): asset['COLLINS'][attrib] = asset['ATTRIBS'][attrib_block][attrib] attrib_key = self.to_safe('%s-%s' % (attrib, asset['ATTRIBS'][attrib_block][attrib])) self.push(self.inventory, attrib_key, asset_identifier) # Indexes asset by all built-in Collins attributes. for attribute in asset['ASSET'].keys(): if attribute not in CollinsDefaults.SPECIAL_ATTRIBUTES: attribute_val = asset['ASSET'][attribute] if attribute_val is not None: attrib_key = self.to_safe('%s-%s' % (attribute, attribute_val)) self.push(self.inventory, attrib_key, asset_identifier) # Indexes asset by hardware product information. if 'HARDWARE' in asset: if 'PRODUCT' in asset['HARDWARE']['BASE']: product = asset['HARDWARE']['BASE']['PRODUCT'] if product: product_key = self.to_safe( 'HARDWARE-PRODUCT-%s' % asset['HARDWARE']['BASE']['PRODUCT']) self.push(self.inventory, product_key, asset_identifier) # Indexing now complete, adds the host details to the asset cache. self.cache[asset_identifier] = asset try: self.write_to_cache(self.cache, self.cache_path_cache) self.write_to_cache(self.inventory, self.cache_path_inventory) except Exception: self.log.error("Error while writing to cache:\n%s", traceback.format_exc()) return False return True def push(self, dictionary, key, value): """ Adds a value to a list at a dictionary key, creating the list if it doesn't exist. """ if key not in dictionary: dictionary[key] = [] dictionary[key].append(value) def get_host_info(self): """ Get variables about a specific host. """ if not self.cache or len(self.cache) == 0: # Need to load index from cache self.load_cache_from_cache() if self.args.host not in self.cache: # try updating the cache self.update_cache() if self.args.host not in self.cache: # host might not exist anymore return self.json_format_dict({}, self.args.pretty) return self.json_format_dict(self.cache[self.args.host], self.args.pretty) def load_inventory_from_cache(self): """ Reads the index from the cache file sets self.index """ try: cache = open(self.cache_path_inventory, 'r') json_inventory = cache.read() self.inventory = json.loads(json_inventory) return True except Exception: self.log.error("Error while loading inventory:\n%s", traceback.format_exc()) self.inventory = {} return False def load_cache_from_cache(self): """ Reads the cache from the cache file sets self.cache """ try: cache = open(self.cache_path_cache, 'r') json_cache = cache.read() self.cache = json.loads(json_cache) return True except Exception: self.log.error("Error while loading host cache:\n%s", traceback.format_exc()) self.cache = {} return False def write_to_cache(self, data, filename): """ Writes data in JSON format to a specified file. """ json_data = self.json_format_dict(data, self.args.pretty) cache = open(filename, 'w') cache.write(json_data) cache.close() def to_safe(self, word): """ Converts 'bad' characters in a string to underscores so they can be used as Ansible groups """ return re.sub(r"[^A-Za-z0-9\-]", "_", word) def json_format_dict(self, data, pretty=False): """ Converts a dict to a JSON object and dumps it as a formatted string """ if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) if __name__ in '__main__': inventory = CollinsInventory() if inventory.run(): sys.exit(0) else: sys.exit(-1)
sgiavasis/nipype
refs/heads/master
tools/toollib.py
10
"""Various utilities common to IPython release and maintenance tools. """ from builtins import map # Library imports import os import sys from subprocess import Popen, PIPE, CalledProcessError, check_call from distutils.dir_util import remove_tree # Useful shorthands pjoin = os.path.join cd = os.chdir # Utility functions # ----------------------------------------------------------------------------- # Functions # ----------------------------------------------------------------------------- def sh(cmd): """Execute command in a subshell, return status code.""" return check_call(cmd, shell=True) def compile_tree(): """Compile all Python files below current directory.""" vstr = '.'.join(map(str, sys.version_info[:2])) stat = os.system('python %s/lib/python%s/compileall.py .' % (sys.prefix, vstr)) if stat: msg = '*** ERROR: Some Python files in tree do NOT compile! ***\n' msg += 'See messages above for the actual file that produced it.\n' raise SystemExit(msg)
leilihh/nova
refs/heads/stable/icehouse
nova/tests/compute/test_compute_utils.py
5
# Copyright 2011 OpenStack Foundation # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests For miscellaneous util methods used with compute.""" import copy import string import mock from oslo.config import cfg from nova.compute import flavors from nova.compute import power_state from nova.compute import task_states from nova.compute import utils as compute_utils from nova import context from nova import db from nova import exception from nova.image import glance from nova.network import api as network_api from nova.objects import block_device as block_device_obj from nova.objects import instance as instance_obj from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova import rpc from nova import test from nova.tests import fake_block_device from nova.tests import fake_instance from nova.tests import fake_instance_actions from nova.tests import fake_network from nova.tests import fake_notifier import nova.tests.image.fake from nova.tests import matchers from nova.virt import driver CONF = cfg.CONF CONF.import_opt('compute_manager', 'nova.service') CONF.import_opt('compute_driver', 'nova.virt.driver') class ComputeValidateDeviceTestCase(test.TestCase): def setUp(self): super(ComputeValidateDeviceTestCase, self).setUp() self.context = context.RequestContext('fake', 'fake') # check if test name includes "xen" if 'xen' in self.id(): self.flags(compute_driver='xenapi.XenAPIDriver') self.instance = { 'uuid': 'fake', 'root_device_name': None, 'instance_type_id': 'fake', } else: self.instance = { 'uuid': 'fake', 'root_device_name': '/dev/vda', 'default_ephemeral_device': '/dev/vdb', 'instance_type_id': 'fake', } self.data = [] self.stubs.Set(db, 'block_device_mapping_get_all_by_instance', lambda context, instance, use_slave=False: self.data) def _update_flavor(self, flavor_info): self.flavor = { 'id': 1, 'name': 'foo', 'memory_mb': 128, 'vcpus': 1, 'root_gb': 10, 'ephemeral_gb': 10, 'flavorid': 1, 'swap': 0, 'rxtx_factor': 1.0, 'vcpu_weight': 1, } self.flavor.update(flavor_info) self.instance['system_metadata'] = [{'key': 'instance_type_%s' % key, 'value': value} for key, value in self.flavor.items()] def _validate_device(self, device=None): bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid( self.context, self.instance['uuid']) return compute_utils.get_device_name_for_instance( self.context, self.instance, bdms, device) @staticmethod def _fake_bdm(device): return fake_block_device.FakeDbBlockDeviceDict({ 'source_type': 'volume', 'destination_type': 'volume', 'device_name': device, 'no_device': None, 'volume_id': 'fake', 'snapshot_id': None, 'guest_format': None }) def test_wrap(self): self.data = [] for letter in string.ascii_lowercase[2:]: self.data.append(self._fake_bdm('/dev/vd' + letter)) device = self._validate_device() self.assertEqual(device, '/dev/vdaa') def test_wrap_plus_one(self): self.data = [] for letter in string.ascii_lowercase[2:]: self.data.append(self._fake_bdm('/dev/vd' + letter)) self.data.append(self._fake_bdm('/dev/vdaa')) device = self._validate_device() self.assertEqual(device, '/dev/vdab') def test_later(self): self.data = [ self._fake_bdm('/dev/vdc'), self._fake_bdm('/dev/vdd'), self._fake_bdm('/dev/vde'), ] device = self._validate_device() self.assertEqual(device, '/dev/vdf') def test_gap(self): self.data = [ self._fake_bdm('/dev/vdc'), self._fake_bdm('/dev/vde'), ] device = self._validate_device() self.assertEqual(device, '/dev/vdd') def test_no_bdms(self): self.data = [] device = self._validate_device() self.assertEqual(device, '/dev/vdc') def test_lxc_names_work(self): self.instance['root_device_name'] = '/dev/a' self.instance['ephemeral_device_name'] = '/dev/b' self.data = [] device = self._validate_device() self.assertEqual(device, '/dev/c') def test_name_conversion(self): self.data = [] device = self._validate_device('/dev/c') self.assertEqual(device, '/dev/vdc') device = self._validate_device('/dev/sdc') self.assertEqual(device, '/dev/vdc') device = self._validate_device('/dev/xvdc') self.assertEqual(device, '/dev/vdc') def test_invalid_bdms(self): self.instance['root_device_name'] = "baddata" self.assertRaises(exception.InvalidDevicePath, self._validate_device) def test_invalid_device_prefix(self): self.assertRaises(exception.InvalidDevicePath, self._validate_device, '/baddata/vdc') def test_device_in_use(self): exc = self.assertRaises(exception.DevicePathInUse, self._validate_device, '/dev/vda') self.assertIn('/dev/vda', str(exc)) def test_swap(self): self.instance['default_swap_device'] = "/dev/vdc" device = self._validate_device() self.assertEqual(device, '/dev/vdd') def test_swap_no_ephemeral(self): del self.instance['default_ephemeral_device'] self.instance['default_swap_device'] = "/dev/vdb" device = self._validate_device() self.assertEqual(device, '/dev/vdc') def test_ephemeral_xenapi(self): self._update_flavor({ 'ephemeral_gb': 10, 'swap': 0, }) self.stubs.Set(flavors, 'get_flavor', lambda instance_type_id, ctxt=None: self.flavor) device = self._validate_device() self.assertEqual(device, '/dev/xvdc') def test_swap_xenapi(self): self._update_flavor({ 'ephemeral_gb': 0, 'swap': 10, }) self.stubs.Set(flavors, 'get_flavor', lambda instance_type_id, ctxt=None: self.flavor) device = self._validate_device() self.assertEqual(device, '/dev/xvdb') def test_swap_and_ephemeral_xenapi(self): self._update_flavor({ 'ephemeral_gb': 10, 'swap': 10, }) self.stubs.Set(flavors, 'get_flavor', lambda instance_type_id, ctxt=None: self.flavor) device = self._validate_device() self.assertEqual(device, '/dev/xvdd') def test_swap_and_one_attachment_xenapi(self): self._update_flavor({ 'ephemeral_gb': 0, 'swap': 10, }) self.stubs.Set(flavors, 'get_flavor', lambda instance_type_id, ctxt=None: self.flavor) device = self._validate_device() self.assertEqual(device, '/dev/xvdb') self.data.append(self._fake_bdm(device)) device = self._validate_device() self.assertEqual(device, '/dev/xvdd') class DefaultDeviceNamesForInstanceTestCase(test.NoDBTestCase): def setUp(self): super(DefaultDeviceNamesForInstanceTestCase, self).setUp() self.context = context.RequestContext('fake', 'fake') self.ephemerals = block_device_obj.block_device_make_list( self.context, [fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'instance_uuid': 'fake-instance', 'device_name': '/dev/vdb', 'source_type': 'blank', 'destination_type': 'local', 'delete_on_termination': True, 'guest_format': None, 'boot_index': -1})]) self.swap = block_device_obj.block_device_make_list( self.context, [fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'instance_uuid': 'fake-instance', 'device_name': '/dev/vdc', 'source_type': 'blank', 'destination_type': 'local', 'delete_on_termination': True, 'guest_format': 'swap', 'boot_index': -1})]) self.block_device_mapping = block_device_obj.block_device_make_list( self.context, [fake_block_device.FakeDbBlockDeviceDict( {'id': 3, 'instance_uuid': 'fake-instance', 'device_name': '/dev/vda', 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'fake-volume-id-1', 'boot_index': 0}), fake_block_device.FakeDbBlockDeviceDict( {'id': 4, 'instance_uuid': 'fake-instance', 'device_name': '/dev/vdd', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': 'fake-snapshot-id-1', 'boot_index': -1})]) self.flavor = {'swap': 4} self.instance = {'uuid': 'fake_instance', 'ephemeral_gb': 2} self.is_libvirt = False self.root_device_name = '/dev/vda' self.update_called = False def fake_extract_flavor(instance): return self.flavor def fake_driver_matches(driver_string): if driver_string == 'libvirt.LibvirtDriver': return self.is_libvirt return False self.patchers = [] self.patchers.append( mock.patch.object(block_device_obj.BlockDeviceMapping, 'save')) self.patchers.append( mock.patch.object( flavors, 'extract_flavor', new=mock.Mock(side_effect=fake_extract_flavor))) self.patchers.append( mock.patch.object(driver, 'compute_driver_matches', new=mock.Mock( side_effect=fake_driver_matches))) for patcher in self.patchers: patcher.start() def tearDown(self): super(DefaultDeviceNamesForInstanceTestCase, self).tearDown() for patcher in self.patchers: patcher.stop() def _test_default_device_names(self, *block_device_lists): compute_utils.default_device_names_for_instance(self.instance, self.root_device_name, *block_device_lists) def test_only_block_device_mapping(self): # Test no-op original_bdm = copy.deepcopy(self.block_device_mapping) self._test_default_device_names([], [], self.block_device_mapping) for original, new in zip(original_bdm, self.block_device_mapping): self.assertEqual(original.device_name, new.device_name) # Asser it defaults the missing one as expected self.block_device_mapping[1]['device_name'] = None self._test_default_device_names([], [], self.block_device_mapping) self.assertEqual(self.block_device_mapping[1]['device_name'], '/dev/vdb') def test_with_ephemerals(self): # Test ephemeral gets assigned self.ephemerals[0]['device_name'] = None self._test_default_device_names(self.ephemerals, [], self.block_device_mapping) self.assertEqual(self.ephemerals[0]['device_name'], '/dev/vdb') self.block_device_mapping[1]['device_name'] = None self._test_default_device_names(self.ephemerals, [], self.block_device_mapping) self.assertEqual(self.block_device_mapping[1]['device_name'], '/dev/vdc') def test_with_swap(self): # Test swap only self.swap[0]['device_name'] = None self._test_default_device_names([], self.swap, []) self.assertEqual(self.swap[0]['device_name'], '/dev/vdb') # Test swap and block_device_mapping self.swap[0]['device_name'] = None self.block_device_mapping[1]['device_name'] = None self._test_default_device_names([], self.swap, self.block_device_mapping) self.assertEqual(self.swap[0]['device_name'], '/dev/vdb') self.assertEqual(self.block_device_mapping[1]['device_name'], '/dev/vdc') def test_all_together(self): # Test swap missing self.swap[0]['device_name'] = None self._test_default_device_names(self.ephemerals, self.swap, self.block_device_mapping) self.assertEqual(self.swap[0]['device_name'], '/dev/vdc') # Test swap and eph missing self.swap[0]['device_name'] = None self.ephemerals[0]['device_name'] = None self._test_default_device_names(self.ephemerals, self.swap, self.block_device_mapping) self.assertEqual(self.ephemerals[0]['device_name'], '/dev/vdb') self.assertEqual(self.swap[0]['device_name'], '/dev/vdc') # Test all missing self.swap[0]['device_name'] = None self.ephemerals[0]['device_name'] = None self.block_device_mapping[1]['device_name'] = None self._test_default_device_names(self.ephemerals, self.swap, self.block_device_mapping) self.assertEqual(self.ephemerals[0]['device_name'], '/dev/vdb') self.assertEqual(self.swap[0]['device_name'], '/dev/vdc') self.assertEqual(self.block_device_mapping[1]['device_name'], '/dev/vdd') class UsageInfoTestCase(test.TestCase): def setUp(self): def fake_get_nw_info(cls, ctxt, instance): self.assertTrue(ctxt.is_admin) return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1) super(UsageInfoTestCase, self).setUp() self.stubs.Set(network_api.API, 'get_instance_nw_info', fake_get_nw_info) fake_notifier.stub_notifier(self.stubs) self.addCleanup(fake_notifier.reset) self.flags(use_local=True, group='conductor') self.flags(compute_driver='nova.virt.fake.FakeDriver', network_manager='nova.network.manager.FlatManager') self.compute = importutils.import_object(CONF.compute_manager) self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id) def fake_show(meh, context, id): return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}} self.stubs.Set(nova.tests.image.fake._FakeImageService, 'show', fake_show) fake_network.set_stub_network_methods(self.stubs) fake_instance_actions.stub_out_action_events(self.stubs) def _create_instance(self, params={}): """Create a test instance.""" flavor = flavors.get_flavor_by_name('m1.tiny') sys_meta = flavors.save_flavor_info({}, flavor) inst = {} inst['image_ref'] = 1 inst['reservation_id'] = 'r-fakeres' inst['user_id'] = self.user_id inst['project_id'] = self.project_id inst['instance_type_id'] = flavor['id'] inst['system_metadata'] = sys_meta inst['ami_launch_index'] = 0 inst['root_gb'] = 0 inst['ephemeral_gb'] = 0 inst['info_cache'] = {'network_info': '[]'} inst.update(params) return db.instance_create(self.context, inst)['id'] def test_notify_usage_exists(self): # Ensure 'exists' notification generates appropriate usage data. instance_id = self._create_instance() instance = instance_obj.Instance.get_by_id(self.context, instance_id) # Set some system metadata sys_metadata = {'image_md_key1': 'val1', 'image_md_key2': 'val2', 'other_data': 'meow'} instance.system_metadata.update(sys_metadata) instance.save() compute_utils.notify_usage_exists( rpc.get_notifier('compute'), self.context, instance) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.priority, 'INFO') self.assertEqual(msg.event_type, 'compute.instance.exists') payload = msg.payload self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], instance['uuid']) self.assertEqual(payload['instance_type'], 'm1.tiny') type_id = flavors.get_flavor_by_name('m1.tiny')['id'] self.assertEqual(str(payload['instance_type_id']), str(type_id)) flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid'] self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id)) for attr in ('display_name', 'created_at', 'launched_at', 'state', 'state_description', 'bandwidth', 'audit_period_beginning', 'audit_period_ending', 'image_meta'): self.assertTrue(attr in payload, msg="Key %s not in payload" % attr) self.assertEqual(payload['image_meta'], {'md_key1': 'val1', 'md_key2': 'val2'}) image_ref_url = "%s/images/1" % glance.generate_glance_url() self.assertEqual(payload['image_ref_url'], image_ref_url) self.compute.terminate_instance(self.context, instance, [], []) def test_notify_usage_exists_deleted_instance(self): # Ensure 'exists' notification generates appropriate usage data. instance_id = self._create_instance() instance = instance_obj.Instance.get_by_id(self.context, instance_id, expected_attrs=['metadata', 'system_metadata', 'info_cache']) # Set some system metadata sys_metadata = {'image_md_key1': 'val1', 'image_md_key2': 'val2', 'other_data': 'meow'} instance.system_metadata.update(sys_metadata) instance.save() self.compute.terminate_instance(self.context, instance, [], []) instance = instance_obj.Instance.get_by_id( self.context.elevated(read_deleted='yes'), instance_id, expected_attrs=['system_metadata']) compute_utils.notify_usage_exists( rpc.get_notifier('compute'), self.context, instance) msg = fake_notifier.NOTIFICATIONS[-1] self.assertEqual(msg.priority, 'INFO') self.assertEqual(msg.event_type, 'compute.instance.exists') payload = msg.payload self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], instance['uuid']) self.assertEqual(payload['instance_type'], 'm1.tiny') type_id = flavors.get_flavor_by_name('m1.tiny')['id'] self.assertEqual(str(payload['instance_type_id']), str(type_id)) flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid'] self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id)) for attr in ('display_name', 'created_at', 'launched_at', 'state', 'state_description', 'bandwidth', 'audit_period_beginning', 'audit_period_ending', 'image_meta'): self.assertTrue(attr in payload, msg="Key %s not in payload" % attr) self.assertEqual(payload['image_meta'], {'md_key1': 'val1', 'md_key2': 'val2'}) image_ref_url = "%s/images/1" % glance.generate_glance_url() self.assertEqual(payload['image_ref_url'], image_ref_url) def test_notify_usage_exists_instance_not_found(self): # Ensure 'exists' notification generates appropriate usage data. instance_id = self._create_instance() instance = instance_obj.Instance.get_by_id(self.context, instance_id, expected_attrs=['metadata', 'system_metadata', 'info_cache']) self.compute.terminate_instance(self.context, instance, [], []) compute_utils.notify_usage_exists( rpc.get_notifier('compute'), self.context, instance) msg = fake_notifier.NOTIFICATIONS[-1] self.assertEqual(msg.priority, 'INFO') self.assertEqual(msg.event_type, 'compute.instance.exists') payload = msg.payload self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], instance['uuid']) self.assertEqual(payload['instance_type'], 'm1.tiny') type_id = flavors.get_flavor_by_name('m1.tiny')['id'] self.assertEqual(str(payload['instance_type_id']), str(type_id)) flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid'] self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id)) for attr in ('display_name', 'created_at', 'launched_at', 'state', 'state_description', 'bandwidth', 'audit_period_beginning', 'audit_period_ending', 'image_meta'): self.assertTrue(attr in payload, msg="Key %s not in payload" % attr) self.assertEqual(payload['image_meta'], {}) image_ref_url = "%s/images/1" % glance.generate_glance_url() self.assertEqual(payload['image_ref_url'], image_ref_url) def test_notify_about_instance_usage(self): instance_id = self._create_instance() instance = instance_obj.Instance.get_by_id(self.context, instance_id, expected_attrs=['metadata', 'system_metadata', 'info_cache']) # Set some system metadata sys_metadata = {'image_md_key1': 'val1', 'image_md_key2': 'val2', 'other_data': 'meow'} instance.system_metadata.update(sys_metadata) instance.save() extra_usage_info = {'image_name': 'fake_name'} compute_utils.notify_about_instance_usage( rpc.get_notifier('compute'), self.context, instance, 'create.start', extra_usage_info=extra_usage_info) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.priority, 'INFO') self.assertEqual(msg.event_type, 'compute.instance.create.start') payload = msg.payload self.assertEqual(payload['tenant_id'], self.project_id) self.assertEqual(payload['user_id'], self.user_id) self.assertEqual(payload['instance_id'], instance['uuid']) self.assertEqual(payload['instance_type'], 'm1.tiny') type_id = flavors.get_flavor_by_name('m1.tiny')['id'] self.assertEqual(str(payload['instance_type_id']), str(type_id)) flavor_id = flavors.get_flavor_by_name('m1.tiny')['flavorid'] self.assertEqual(str(payload['instance_flavor_id']), str(flavor_id)) for attr in ('display_name', 'created_at', 'launched_at', 'state', 'state_description', 'image_meta'): self.assertTrue(attr in payload, msg="Key %s not in payload" % attr) self.assertEqual(payload['image_meta'], {'md_key1': 'val1', 'md_key2': 'val2'}) self.assertEqual(payload['image_name'], 'fake_name') image_ref_url = "%s/images/1" % glance.generate_glance_url() self.assertEqual(payload['image_ref_url'], image_ref_url) self.compute.terminate_instance(self.context, instance, [], []) def test_notify_about_aggregate_update_with_id(self): # Set aggregate payload aggregate_payload = {'aggregate_id': 1} compute_utils.notify_about_aggregate_update(self.context, "create.end", aggregate_payload) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.priority, 'INFO') self.assertEqual(msg.event_type, 'aggregate.create.end') payload = msg.payload self.assertEqual(payload['aggregate_id'], 1) def test_notify_about_aggregate_update_with_name(self): # Set aggregate payload aggregate_payload = {'name': 'fakegroup'} compute_utils.notify_about_aggregate_update(self.context, "create.start", aggregate_payload) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1) msg = fake_notifier.NOTIFICATIONS[0] self.assertEqual(msg.priority, 'INFO') self.assertEqual(msg.event_type, 'aggregate.create.start') payload = msg.payload self.assertEqual(payload['name'], 'fakegroup') def test_notify_about_aggregate_update_without_name_id(self): # Set empty aggregate payload aggregate_payload = {} compute_utils.notify_about_aggregate_update(self.context, "create.start", aggregate_payload) self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0) class ComputeGetImageMetadataTestCase(test.TestCase): def setUp(self): super(ComputeGetImageMetadataTestCase, self).setUp() self.context = context.RequestContext('fake', 'fake') self.image = { "min_ram": 10, "min_disk": 1, "disk_format": "raw", "container_format": "bare", "properties": {}, } self.image_service = nova.tests.image.fake._FakeImageService() self.stubs.Set(self.image_service, 'show', self._fake_show) self.ctx = context.RequestContext('fake', 'fake') sys_meta = { 'image_min_ram': 10, 'image_min_disk': 1, 'image_disk_format': 'raw', 'image_container_format': 'bare', 'instance_type_id': 0, 'instance_type_name': 'm1.fake', 'instance_type_memory_mb': 10, 'instance_type_vcpus': 1, 'instance_type_root_gb': 1, 'instance_type_ephemeral_gb': 1, 'instance_type_flavorid': '0', 'instance_type_swap': 1, 'instance_type_rxtx_factor': 0.0, 'instance_type_vcpu_weight': None, } self.instance = fake_instance.fake_db_instance( memory_mb=0, root_gb=0, system_metadata=sys_meta) @property def instance_obj(self): return instance_obj.Instance._from_db_object( self.ctx, instance_obj.Instance(), self.instance, expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS) def _fake_show(self, ctx, image_id): return self.image def test_get_image_meta(self): image_meta = compute_utils.get_image_metadata( self.ctx, self.image_service, 'fake-image', self.instance_obj) self.image['properties'] = 'DONTCARE' self.assertThat(self.image, matchers.DictMatches(image_meta)) def test_get_image_meta_no_image(self): def fake_show(ctx, image_id): raise exception.ImageNotFound(image_id='fake-image') self.stubs.Set(self.image_service, 'show', fake_show) image_meta = compute_utils.get_image_metadata( self.ctx, self.image_service, 'fake-image', self.instance_obj) self.image['properties'] = 'DONTCARE' # NOTE(danms): The trip through system_metadata will stringify things for key in self.image: self.image[key] = str(self.image[key]) self.assertThat(self.image, matchers.DictMatches(image_meta)) def test_get_image_meta_no_image_system_meta(self): for k in self.instance['system_metadata'].keys(): if k.startswith('image_'): del self.instance['system_metadata'][k] image_meta = compute_utils.get_image_metadata( self.ctx, self.image_service, 'fake-image', self.instance_obj) self.image['properties'] = 'DONTCARE' self.assertThat(self.image, matchers.DictMatches(image_meta)) def test_get_image_meta_no_image_no_image_system_meta(self): def fake_show(ctx, image_id): raise exception.ImageNotFound(image_id='fake-image') self.stubs.Set(self.image_service, 'show', fake_show) for k in self.instance['system_metadata'].keys(): if k.startswith('image_'): del self.instance['system_metadata'][k] image_meta = compute_utils.get_image_metadata( self.ctx, self.image_service, 'fake-image', self.instance_obj) expected = {'properties': 'DONTCARE'} self.assertThat(expected, matchers.DictMatches(image_meta)) class ComputeUtilsGetNWInfo(test.TestCase): def test_instance_object_none_info_cache(self): inst = fake_instance.fake_instance_obj('fake-context', expected_attrs=['info_cache']) self.assertIsNone(inst.info_cache) result = compute_utils.get_nw_info_for_instance(inst) self.assertEqual(jsonutils.dumps([]), result.json()) def test_instance_dict_none_info_cache(self): inst = fake_instance.fake_db_instance(info_cache=None) self.assertIsNone(inst['info_cache']) result = compute_utils.get_nw_info_for_instance(inst) self.assertEqual(jsonutils.dumps([]), result.json()) class ComputeUtilsGetRebootTypes(test.TestCase): def setUp(self): super(ComputeUtilsGetRebootTypes, self).setUp() self.context = context.RequestContext('fake', 'fake') def test_get_reboot_type_started_soft(self): reboot_type = compute_utils.get_reboot_type(task_states.REBOOT_STARTED, power_state.RUNNING) self.assertEqual(reboot_type, 'SOFT') def test_get_reboot_type_pending_soft(self): reboot_type = compute_utils.get_reboot_type(task_states.REBOOT_PENDING, power_state.RUNNING) self.assertEqual(reboot_type, 'SOFT') def test_get_reboot_type_hard(self): reboot_type = compute_utils.get_reboot_type('foo', power_state.RUNNING) self.assertEqual(reboot_type, 'HARD') def test_get_reboot_not_running_hard(self): reboot_type = compute_utils.get_reboot_type('foo', 'bar') self.assertEqual(reboot_type, 'HARD')
ivan-fedorov/intellij-community
refs/heads/master
python/lib/Lib/site-packages/django/contrib/gis/tests/relatedapp/tests.py
123
from django.test import TestCase from django.contrib.gis.geos import GEOSGeometry, Point, MultiPoint from django.contrib.gis.db.models import Collect, Count, Extent, F, Union from django.contrib.gis.geometry.backend import Geometry from django.contrib.gis.tests.utils import mysql, oracle, no_mysql, no_oracle, no_spatialite from models import City, Location, DirectoryEntry, Parcel, Book, Author, Article class RelatedGeoModelTest(TestCase): def test02_select_related(self): "Testing `select_related` on geographic models (see #7126)." qs1 = City.objects.all() qs2 = City.objects.select_related() qs3 = City.objects.select_related('location') # Reference data for what's in the fixtures. cities = ( ('Aurora', 'TX', -97.516111, 33.058333), ('Roswell', 'NM', -104.528056, 33.387222), ('Kecksburg', 'PA', -79.460734, 40.18476), ) for qs in (qs1, qs2, qs3): for ref, c in zip(cities, qs): nm, st, lon, lat = ref self.assertEqual(nm, c.name) self.assertEqual(st, c.state) self.assertEqual(Point(lon, lat), c.location.point) @no_mysql def test03_transform_related(self): "Testing the `transform` GeoQuerySet method on related geographic models." # All the transformations are to state plane coordinate systems using # US Survey Feet (thus a tolerance of 0 implies error w/in 1 survey foot). tol = 0 def check_pnt(ref, pnt): self.assertAlmostEqual(ref.x, pnt.x, tol) self.assertAlmostEqual(ref.y, pnt.y, tol) self.assertEqual(ref.srid, pnt.srid) # Each city transformed to the SRID of their state plane coordinate system. transformed = (('Kecksburg', 2272, 'POINT(1490553.98959621 314792.131023984)'), ('Roswell', 2257, 'POINT(481902.189077221 868477.766629735)'), ('Aurora', 2276, 'POINT(2269923.2484839 7069381.28722222)'), ) for name, srid, wkt in transformed: # Doing this implicitly sets `select_related` select the location. # TODO: Fix why this breaks on Oracle. qs = list(City.objects.filter(name=name).transform(srid, field_name='location__point')) check_pnt(GEOSGeometry(wkt, srid), qs[0].location.point) @no_mysql @no_spatialite def test04a_related_extent_aggregate(self): "Testing the `extent` GeoQuerySet aggregates on related geographic models." # This combines the Extent and Union aggregates into one query aggs = City.objects.aggregate(Extent('location__point')) # One for all locations, one that excludes New Mexico (Roswell). all_extent = (-104.528056, 29.763374, -79.460734, 40.18476) txpa_extent = (-97.516111, 29.763374, -79.460734, 40.18476) e1 = City.objects.extent(field_name='location__point') e2 = City.objects.exclude(state='NM').extent(field_name='location__point') e3 = aggs['location__point__extent'] # The tolerance value is to four decimal places because of differences # between the Oracle and PostGIS spatial backends on the extent calculation. tol = 4 for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]: for ref_val, e_val in zip(ref, e): self.assertAlmostEqual(ref_val, e_val, tol) @no_mysql def test04b_related_union_aggregate(self): "Testing the `unionagg` GeoQuerySet aggregates on related geographic models." # This combines the Extent and Union aggregates into one query aggs = City.objects.aggregate(Union('location__point')) # These are the points that are components of the aggregate geographic # union that is returned. Each point # corresponds to City PK. p1 = Point(-104.528056, 33.387222) p2 = Point(-97.516111, 33.058333) p3 = Point(-79.460734, 40.18476) p4 = Point(-96.801611, 32.782057) p5 = Point(-95.363151, 29.763374) # Creating the reference union geometry depending on the spatial backend, # as Oracle will have a different internal ordering of the component # geometries than PostGIS. The second union aggregate is for a union # query that includes limiting information in the WHERE clause (in other # words a `.filter()` precedes the call to `.unionagg()`). if oracle: ref_u1 = MultiPoint(p4, p5, p3, p1, p2, srid=4326) ref_u2 = MultiPoint(p3, p2, srid=4326) else: # Looks like PostGIS points by longitude value. ref_u1 = MultiPoint(p1, p2, p4, p5, p3, srid=4326) ref_u2 = MultiPoint(p2, p3, srid=4326) u1 = City.objects.unionagg(field_name='location__point') u2 = City.objects.exclude(name__in=('Roswell', 'Houston', 'Dallas', 'Fort Worth')).unionagg(field_name='location__point') u3 = aggs['location__point__union'] self.assertEqual(ref_u1, u1) self.assertEqual(ref_u2, u2) self.assertEqual(ref_u1, u3) def test05_select_related_fk_to_subclass(self): "Testing that calling select_related on a query over a model with an FK to a model subclass works" # Regression test for #9752. l = list(DirectoryEntry.objects.all().select_related()) def test06_f_expressions(self): "Testing F() expressions on GeometryFields." # Constructing a dummy parcel border and getting the City instance for # assigning the FK. b1 = GEOSGeometry('POLYGON((-97.501205 33.052520,-97.501205 33.052576,-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))', srid=4326) pcity = City.objects.get(name='Aurora') # First parcel has incorrect center point that is equal to the City; # it also has a second border that is different from the first as a # 100ft buffer around the City. c1 = pcity.location.point c2 = c1.transform(2276, clone=True) b2 = c2.buffer(100) p1 = Parcel.objects.create(name='P1', city=pcity, center1=c1, center2=c2, border1=b1, border2=b2) # Now creating a second Parcel where the borders are the same, just # in different coordinate systems. The center points are also the # the same (but in different coordinate systems), and this time they # actually correspond to the centroid of the border. c1 = b1.centroid c2 = c1.transform(2276, clone=True) p2 = Parcel.objects.create(name='P2', city=pcity, center1=c1, center2=c2, border1=b1, border2=b1) # Should return the second Parcel, which has the center within the # border. qs = Parcel.objects.filter(center1__within=F('border1')) self.assertEqual(1, len(qs)) self.assertEqual('P2', qs[0].name) if not mysql: # This time center2 is in a different coordinate system and needs # to be wrapped in transformation SQL. qs = Parcel.objects.filter(center2__within=F('border1')) self.assertEqual(1, len(qs)) self.assertEqual('P2', qs[0].name) # Should return the first Parcel, which has the center point equal # to the point in the City ForeignKey. qs = Parcel.objects.filter(center1=F('city__location__point')) self.assertEqual(1, len(qs)) self.assertEqual('P1', qs[0].name) if not mysql: # This time the city column should be wrapped in transformation SQL. qs = Parcel.objects.filter(border2__contains=F('city__location__point')) self.assertEqual(1, len(qs)) self.assertEqual('P1', qs[0].name) def test07_values(self): "Testing values() and values_list() and GeoQuerySets." # GeoQuerySet and GeoValuesQuerySet, and GeoValuesListQuerySet respectively. gqs = Location.objects.all() gvqs = Location.objects.values() gvlqs = Location.objects.values_list() # Incrementing through each of the models, dictionaries, and tuples # returned by the different types of GeoQuerySets. for m, d, t in zip(gqs, gvqs, gvlqs): # The values should be Geometry objects and not raw strings returned # by the spatial database. self.failUnless(isinstance(d['point'], Geometry)) self.failUnless(isinstance(t[1], Geometry)) self.assertEqual(m.point, d['point']) self.assertEqual(m.point, t[1]) def test08_defer_only(self): "Testing defer() and only() on Geographic models." qs = Location.objects.all() def_qs = Location.objects.defer('point') for loc, def_loc in zip(qs, def_qs): self.assertEqual(loc.point, def_loc.point) def test09_pk_relations(self): "Ensuring correct primary key column is selected across relations. See #10757." # The expected ID values -- notice the last two location IDs # are out of order. Dallas and Houston have location IDs that differ # from their PKs -- this is done to ensure that the related location # ID column is selected instead of ID column for the city. city_ids = (1, 2, 3, 4, 5) loc_ids = (1, 2, 3, 5, 4) ids_qs = City.objects.order_by('id').values('id', 'location__id') for val_dict, c_id, l_id in zip(ids_qs, city_ids, loc_ids): self.assertEqual(val_dict['id'], c_id) self.assertEqual(val_dict['location__id'], l_id) def test10_combine(self): "Testing the combination of two GeoQuerySets. See #10807." buf1 = City.objects.get(name='Aurora').location.point.buffer(0.1) buf2 = City.objects.get(name='Kecksburg').location.point.buffer(0.1) qs1 = City.objects.filter(location__point__within=buf1) qs2 = City.objects.filter(location__point__within=buf2) combined = qs1 | qs2 names = [c.name for c in combined] self.assertEqual(2, len(names)) self.failUnless('Aurora' in names) self.failUnless('Kecksburg' in names) def test11_geoquery_pickle(self): "Ensuring GeoQuery objects are unpickled correctly. See #10839." import pickle from django.contrib.gis.db.models.sql import GeoQuery qs = City.objects.all() q_str = pickle.dumps(qs.query) q = pickle.loads(q_str) self.assertEqual(GeoQuery, q.__class__) # TODO: fix on Oracle -- get the following error because the SQL is ordered # by a geometry object, which Oracle apparently doesn't like: # ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type @no_oracle def test12a_count(self): "Testing `Count` aggregate use with the `GeoManager` on geo-fields." # The City, 'Fort Worth' uses the same location as Dallas. dallas = City.objects.get(name='Dallas') # Count annotation should be 2 for the Dallas location now. loc = Location.objects.annotate(num_cities=Count('city')).get(id=dallas.location.id) self.assertEqual(2, loc.num_cities) def test12b_count(self): "Testing `Count` aggregate use with the `GeoManager` on non geo-fields. See #11087." # Should only be one author (Trevor Paglen) returned by this query, and # the annotation should have 3 for the number of books, see #11087. # Also testing with a `GeoValuesQuerySet`, see #11489. qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1) vqs = Author.objects.values('name').annotate(num_books=Count('books')).filter(num_books__gt=1) self.assertEqual(1, len(qs)) self.assertEqual(3, qs[0].num_books) self.assertEqual(1, len(vqs)) self.assertEqual(3, vqs[0]['num_books']) # TODO: The phantom model does appear on Oracle. @no_oracle def test13_select_related_null_fk(self): "Testing `select_related` on a nullable ForeignKey via `GeoManager`. See #11381." no_author = Book.objects.create(title='Without Author') b = Book.objects.select_related('author').get(title='Without Author') # Should be `None`, and not a 'dummy' model. self.assertEqual(None, b.author) @no_mysql @no_oracle @no_spatialite def test14_collect(self): "Testing the `collect` GeoQuerySet method and `Collect` aggregate." # Reference query: # SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN # "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id") # WHERE "relatedapp_city"."state" = 'TX'; ref_geom = GEOSGeometry('MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057,-95.363151 29.763374,-96.801611 32.782057)') c1 = City.objects.filter(state='TX').collect(field_name='location__point') c2 = City.objects.filter(state='TX').aggregate(Collect('location__point'))['location__point__collect'] for coll in (c1, c2): # Even though Dallas and Ft. Worth share same point, Collect doesn't # consolidate -- that's why 4 points in MultiPoint. self.assertEqual(4, len(coll)) self.assertEqual(ref_geom, coll) def test15_invalid_select_related(self): "Testing doing select_related on the related name manager of a unique FK. See #13934." qs = Article.objects.select_related('author__article') # This triggers TypeError when `get_default_columns` has no `local_only` # keyword. The TypeError is swallowed if QuerySet is actually # evaluated as list generation swallows TypeError in CPython. sql = str(qs.query) # TODO: Related tests for KML, GML, and distance lookups.
linuxmidhun/0install
refs/heads/master
zeroinstall/gtkui/help_box.py
8
"""A dialog box for displaying help text.""" # Copyright (C) 2009, Thomas Leonard # See the README file for details, or visit http://0install.net. import gtk import sys class HelpBox(object): """A dialog for showing longish help texts. The GTK widget is not created until L{display} is called. """ box = None title = None sections = None def __init__(self, title, *sections): """Constructor. @param title: window title @param sections: the content, as a list of (section_title, section_body) pairs @type sections: [(str, str)]""" self.title = title self.sections = sections def display(self): """Display this help text. If it is already displayed, close the old window first.""" if self.box: self.box.destroy() assert not self.box self.box = box = gtk.Dialog() if sys.version_info[0] < 3: self.box.set_has_separator(False) self.box.set_position(gtk.WIN_POS_CENTER) box.set_title(self.title) swin = gtk.ScrolledWindow(None, None) swin.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_ALWAYS) swin.set_shadow_type(gtk.SHADOW_IN) swin.set_border_width(2) box.get_content_area().pack_start(swin, True, True, 0) text = gtk.TextView() text.set_left_margin(4) text.set_right_margin(4) text.set_wrap_mode(gtk.WRAP_WORD) text.set_editable(False) text.set_cursor_visible(False) model = text.get_buffer() titer = model.get_start_iter() heading_style = model.create_tag(underline = True, scale = 1.2) first = True for title, body in self.sections: if first: first = False else: model.insert(titer, '\n\n') model.insert_with_tags(titer, title, heading_style) model.insert(titer, '\n' + body.strip()) swin.add(text) swin.show_all() box.add_button(gtk.STOCK_CLOSE, gtk.RESPONSE_CANCEL) box.connect('response', lambda box, resp: box.destroy()) box.set_default_response(gtk.RESPONSE_CANCEL) def destroyed(box): self.box = None box.connect('destroy', destroyed) box.set_position(gtk.WIN_POS_CENTER) box.set_default_size(gtk.gdk.screen_width() / 4, gtk.gdk.screen_height() / 3) box.show()
clovett/MissionPlanner
refs/heads/master
Lib/distutils/filelist.py
50
"""distutils.filelist Provides the FileList class, used for poking about the filesystem and building lists of files. """ __revision__ = "$Id$" import os, re import fnmatch from distutils.util import convert_path from distutils.errors import DistutilsTemplateError, DistutilsInternalError from distutils import log class FileList: """A list of files built by on exploring the filesystem and filtered by applying various patterns to what we find there. Instance attributes: dir directory from which files will be taken -- only used if 'allfiles' not supplied to constructor files list of filenames currently being built/filtered/manipulated allfiles complete list of files under consideration (ie. without any filtering applied) """ def __init__(self, warn=None, debug_print=None): # ignore argument to FileList, but keep them for backwards # compatibility self.allfiles = None self.files = [] def set_allfiles(self, allfiles): self.allfiles = allfiles def findall(self, dir=os.curdir): self.allfiles = findall(dir) def debug_print(self, msg): """Print 'msg' to stdout if the global DEBUG (taken from the DISTUTILS_DEBUG environment variable) flag is true. """ from distutils.debug import DEBUG if DEBUG: print msg # -- List-like methods --------------------------------------------- def append(self, item): self.files.append(item) def extend(self, items): self.files.extend(items) def sort(self): # Not a strict lexical sort! sortable_files = map(os.path.split, self.files) sortable_files.sort() self.files = [] for sort_tuple in sortable_files: self.files.append(os.path.join(*sort_tuple)) # -- Other miscellaneous utility methods --------------------------- def remove_duplicates(self): # Assumes list has been sorted! for i in range(len(self.files) - 1, 0, -1): if self.files[i] == self.files[i - 1]: del self.files[i] # -- "File template" methods --------------------------------------- def _parse_template_line(self, line): words = line.split() action = words[0] patterns = dir = dir_pattern = None if action in ('include', 'exclude', 'global-include', 'global-exclude'): if len(words) < 2: raise DistutilsTemplateError, \ "'%s' expects <pattern1> <pattern2> ..." % action patterns = map(convert_path, words[1:]) elif action in ('recursive-include', 'recursive-exclude'): if len(words) < 3: raise DistutilsTemplateError, \ "'%s' expects <dir> <pattern1> <pattern2> ..." % action dir = convert_path(words[1]) patterns = map(convert_path, words[2:]) elif action in ('graft', 'prune'): if len(words) != 2: raise DistutilsTemplateError, \ "'%s' expects a single <dir_pattern>" % action dir_pattern = convert_path(words[1]) else: raise DistutilsTemplateError, "unknown action '%s'" % action return (action, patterns, dir, dir_pattern) def process_template_line(self, line): # Parse the line: split it up, make sure the right number of words # is there, and return the relevant words. 'action' is always # defined: it's the first word of the line. Which of the other # three are defined depends on the action; it'll be either # patterns, (dir and patterns), or (dir_pattern). action, patterns, dir, dir_pattern = self._parse_template_line(line) # OK, now we know that the action is valid and we have the # right number of words on the line for that action -- so we # can proceed with minimal error-checking. if action == 'include': self.debug_print("include " + ' '.join(patterns)) for pattern in patterns: if not self.include_pattern(pattern, anchor=1): log.warn("warning: no files found matching '%s'", pattern) elif action == 'exclude': self.debug_print("exclude " + ' '.join(patterns)) for pattern in patterns: if not self.exclude_pattern(pattern, anchor=1): log.warn(("warning: no previously-included files " "found matching '%s'"), pattern) elif action == 'global-include': self.debug_print("global-include " + ' '.join(patterns)) for pattern in patterns: if not self.include_pattern(pattern, anchor=0): log.warn(("warning: no files found matching '%s' " + "anywhere in distribution"), pattern) elif action == 'global-exclude': self.debug_print("global-exclude " + ' '.join(patterns)) for pattern in patterns: if not self.exclude_pattern(pattern, anchor=0): log.warn(("warning: no previously-included files matching " "'%s' found anywhere in distribution"), pattern) elif action == 'recursive-include': self.debug_print("recursive-include %s %s" % (dir, ' '.join(patterns))) for pattern in patterns: if not self.include_pattern(pattern, prefix=dir): log.warn(("warning: no files found matching '%s' " + "under directory '%s'"), pattern, dir) elif action == 'recursive-exclude': self.debug_print("recursive-exclude %s %s" % (dir, ' '.join(patterns))) for pattern in patterns: if not self.exclude_pattern(pattern, prefix=dir): log.warn(("warning: no previously-included files matching " "'%s' found under directory '%s'"), pattern, dir) elif action == 'graft': self.debug_print("graft " + dir_pattern) if not self.include_pattern(None, prefix=dir_pattern): log.warn("warning: no directories found matching '%s'", dir_pattern) elif action == 'prune': self.debug_print("prune " + dir_pattern) if not self.exclude_pattern(None, prefix=dir_pattern): log.warn(("no previously-included directories found " + "matching '%s'"), dir_pattern) else: raise DistutilsInternalError, \ "this cannot happen: invalid action '%s'" % action # -- Filtering/selection methods ----------------------------------- def include_pattern(self, pattern, anchor=1, prefix=None, is_regex=0): """Select strings (presumably filenames) from 'self.files' that match 'pattern', a Unix-style wildcard (glob) pattern. Patterns are not quite the same as implemented by the 'fnmatch' module: '*' and '?' match non-special characters, where "special" is platform-dependent: slash on Unix; colon, slash, and backslash on DOS/Windows; and colon on Mac OS. If 'anchor' is true (the default), then the pattern match is more stringent: "*.py" will match "foo.py" but not "foo/bar.py". If 'anchor' is false, both of these will match. If 'prefix' is supplied, then only filenames starting with 'prefix' (itself a pattern) and ending with 'pattern', with anything in between them, will match. 'anchor' is ignored in this case. If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and 'pattern' is assumed to be either a string containing a regex or a regex object -- no translation is done, the regex is just compiled and used as-is. Selected strings will be added to self.files. Return 1 if files are found. """ files_found = 0 pattern_re = translate_pattern(pattern, anchor, prefix, is_regex) self.debug_print("include_pattern: applying regex r'%s'" % pattern_re.pattern) # delayed loading of allfiles list if self.allfiles is None: self.findall() for name in self.allfiles: if pattern_re.search(name): self.debug_print(" adding " + name) self.files.append(name) files_found = 1 return files_found def exclude_pattern(self, pattern, anchor=1, prefix=None, is_regex=0): """Remove strings (presumably filenames) from 'files' that match 'pattern'. Other parameters are the same as for 'include_pattern()', above. The list 'self.files' is modified in place. Return 1 if files are found. """ files_found = 0 pattern_re = translate_pattern(pattern, anchor, prefix, is_regex) self.debug_print("exclude_pattern: applying regex r'%s'" % pattern_re.pattern) for i in range(len(self.files)-1, -1, -1): if pattern_re.search(self.files[i]): self.debug_print(" removing " + self.files[i]) del self.files[i] files_found = 1 return files_found # ---------------------------------------------------------------------- # Utility functions def findall(dir = os.curdir): """Find all files under 'dir' and return the list of full filenames (relative to 'dir'). """ from stat import ST_MODE, S_ISREG, S_ISDIR, S_ISLNK list = [] stack = [dir] pop = stack.pop push = stack.append while stack: dir = pop() names = os.listdir(dir) for name in names: if dir != os.curdir: # avoid the dreaded "./" syndrome fullname = os.path.join(dir, name) else: fullname = name # Avoid excess stat calls -- just one will do, thank you! stat = os.stat(fullname) mode = stat[ST_MODE] if S_ISREG(mode): list.append(fullname) elif S_ISDIR(mode) and not S_ISLNK(mode): push(fullname) return list def glob_to_re(pattern): """Translate a shell-like glob pattern to a regular expression. Return a string containing the regex. Differs from 'fnmatch.translate()' in that '*' does not match "special characters" (which are platform-specific). """ pattern_re = fnmatch.translate(pattern) # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix, # and by extension they shouldn't match such "special characters" under # any OS. So change all non-escaped dots in the RE to match any # character except the special characters. # XXX currently the "special characters" are just slash -- i.e. this is # Unix-only. pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', r'\1[^/]', pattern_re) return pattern_re def translate_pattern(pattern, anchor=1, prefix=None, is_regex=0): """Translate a shell-like wildcard pattern to a compiled regular expression. Return the compiled regex. If 'is_regex' true, then 'pattern' is directly compiled to a regex (if it's a string) or just returned as-is (assumes it's a regex object). """ if is_regex: if isinstance(pattern, str): return re.compile(pattern) else: return pattern if pattern: pattern_re = glob_to_re(pattern) else: pattern_re = '' if prefix is not None: # ditch end of pattern character empty_pattern = glob_to_re('') prefix_re = glob_to_re(prefix)[:-len(empty_pattern)] pattern_re = "^" + os.path.join(prefix_re, ".*" + pattern_re) else: # no prefix -- respect anchor flag if anchor: pattern_re = "^" + pattern_re return re.compile(pattern_re)
maliciamrg/xbmc-addon-tvtumbler
refs/heads/master
resources/lib/dns/tsig.py
18
# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """DNS TSIG support.""" import hmac import struct import sys import dns.exception import dns.hash import dns.rdataclass import dns.name class BadTime(dns.exception.DNSException): """Raised if the current time is not within the TSIG's validity time.""" pass class BadSignature(dns.exception.DNSException): """Raised if the TSIG signature fails to verify.""" pass class PeerError(dns.exception.DNSException): """Base class for all TSIG errors generated by the remote peer""" pass class PeerBadKey(PeerError): """Raised if the peer didn't know the key we used""" pass class PeerBadSignature(PeerError): """Raised if the peer didn't like the signature we sent""" pass class PeerBadTime(PeerError): """Raised if the peer didn't like the time we sent""" pass class PeerBadTruncation(PeerError): """Raised if the peer didn't like amount of truncation in the TSIG we sent""" pass # TSIG Algorithms HMAC_MD5 = dns.name.from_text("HMAC-MD5.SIG-ALG.REG.INT") HMAC_SHA1 = dns.name.from_text("hmac-sha1") HMAC_SHA224 = dns.name.from_text("hmac-sha224") HMAC_SHA256 = dns.name.from_text("hmac-sha256") HMAC_SHA384 = dns.name.from_text("hmac-sha384") HMAC_SHA512 = dns.name.from_text("hmac-sha512") default_algorithm = HMAC_MD5 BADSIG = 16 BADKEY = 17 BADTIME = 18 BADTRUNC = 22 def sign(wire, keyname, secret, time, fudge, original_id, error, other_data, request_mac, ctx=None, multi=False, first=True, algorithm=default_algorithm): """Return a (tsig_rdata, mac, ctx) tuple containing the HMAC TSIG rdata for the input parameters, the HMAC MAC calculated by applying the TSIG signature algorithm, and the TSIG digest context. @rtype: (string, string, hmac.HMAC object) @raises ValueError: I{other_data} is too long @raises NotImplementedError: I{algorithm} is not supported """ (algorithm_name, digestmod) = get_algorithm(algorithm) if first: ctx = hmac.new(secret, digestmod=digestmod) ml = len(request_mac) if ml > 0: ctx.update(struct.pack('!H', ml)) ctx.update(request_mac) id = struct.pack('!H', original_id) ctx.update(id) ctx.update(wire[2:]) if first: ctx.update(keyname.to_digestable()) ctx.update(struct.pack('!H', dns.rdataclass.ANY)) ctx.update(struct.pack('!I', 0)) long_time = time + 0L upper_time = (long_time >> 32) & 0xffffL lower_time = long_time & 0xffffffffL time_mac = struct.pack('!HIH', upper_time, lower_time, fudge) pre_mac = algorithm_name + time_mac ol = len(other_data) if ol > 65535: raise ValueError('TSIG Other Data is > 65535 bytes') post_mac = struct.pack('!HH', error, ol) + other_data if first: ctx.update(pre_mac) ctx.update(post_mac) else: ctx.update(time_mac) mac = ctx.digest() mpack = struct.pack('!H', len(mac)) tsig_rdata = pre_mac + mpack + mac + id + post_mac if multi: ctx = hmac.new(secret, digestmod=digestmod) ml = len(mac) ctx.update(struct.pack('!H', ml)) ctx.update(mac) else: ctx = None return (tsig_rdata, mac, ctx) def hmac_md5(wire, keyname, secret, time, fudge, original_id, error, other_data, request_mac, ctx=None, multi=False, first=True, algorithm=default_algorithm): return sign(wire, keyname, secret, time, fudge, original_id, error, other_data, request_mac, ctx, multi, first, algorithm) def validate(wire, keyname, secret, now, request_mac, tsig_start, tsig_rdata, tsig_rdlen, ctx=None, multi=False, first=True): """Validate the specified TSIG rdata against the other input parameters. @raises FormError: The TSIG is badly formed. @raises BadTime: There is too much time skew between the client and the server. @raises BadSignature: The TSIG signature did not validate @rtype: hmac.HMAC object""" (adcount,) = struct.unpack("!H", wire[10:12]) if adcount == 0: raise dns.exception.FormError adcount -= 1 new_wire = wire[0:10] + struct.pack("!H", adcount) + wire[12:tsig_start] current = tsig_rdata (aname, used) = dns.name.from_wire(wire, current) current = current + used (upper_time, lower_time, fudge, mac_size) = \ struct.unpack("!HIHH", wire[current:current + 10]) time = ((upper_time + 0L) << 32) + (lower_time + 0L) current += 10 mac = wire[current:current + mac_size] current += mac_size (original_id, error, other_size) = \ struct.unpack("!HHH", wire[current:current + 6]) current += 6 other_data = wire[current:current + other_size] current += other_size if current != tsig_rdata + tsig_rdlen: raise dns.exception.FormError if error != 0: if error == BADSIG: raise PeerBadSignature elif error == BADKEY: raise PeerBadKey elif error == BADTIME: raise PeerBadTime elif error == BADTRUNC: raise PeerBadTruncation else: raise PeerError('unknown TSIG error code %d' % error) time_low = time - fudge time_high = time + fudge if now < time_low or now > time_high: raise BadTime (junk, our_mac, ctx) = sign(new_wire, keyname, secret, time, fudge, original_id, error, other_data, request_mac, ctx, multi, first, aname) if (our_mac != mac): raise BadSignature return ctx _hashes = None def _maybe_add_hash(tsig_alg, hash_alg): try: _hashes[tsig_alg] = dns.hash.get(hash_alg) except KeyError: pass def _setup_hashes(): global _hashes _hashes = {} _maybe_add_hash(HMAC_SHA224, 'SHA224') _maybe_add_hash(HMAC_SHA256, 'SHA256') _maybe_add_hash(HMAC_SHA384, 'SHA384') _maybe_add_hash(HMAC_SHA512, 'SHA512') _maybe_add_hash(HMAC_SHA1, 'SHA1') _maybe_add_hash(HMAC_MD5, 'MD5') def get_algorithm(algorithm): """Returns the wire format string and the hash module to use for the specified TSIG algorithm @rtype: (string, hash constructor) @raises NotImplementedError: I{algorithm} is not supported """ global _hashes if _hashes is None: _setup_hashes() if isinstance(algorithm, (str, unicode)): algorithm = dns.name.from_text(algorithm) if sys.hexversion < 0x02050200 and \ (algorithm == HMAC_SHA384 or algorithm == HMAC_SHA512): raise NotImplementedError("TSIG algorithm " + str(algorithm) + " requires Python 2.5.2 or later") try: return (algorithm.to_digestable(), _hashes[algorithm]) except KeyError: raise NotImplementedError("TSIG algorithm " + str(algorithm) + " is not supported")
bclau/nova
refs/heads/master
nova/tests/api/openstack/compute/contrib/test_quotas.py
8
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 OpenStack Foundation # Copyright 2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from lxml import etree import webob from nova.api.openstack.compute.contrib import quotas from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova import context as context_maker from nova import quota from nova import test from nova.tests.api.openstack import fakes def quota_set(id): return {'quota_set': {'id': id, 'metadata_items': 128, 'ram': 51200, 'floating_ips': 10, 'fixed_ips': -1, 'instances': 10, 'injected_files': 5, 'cores': 20, 'injected_file_content_bytes': 10240, 'security_groups': 10, 'security_group_rules': 20, 'key_pairs': 100, 'injected_file_path_bytes': 255}} class QuotaSetsTest(test.TestCase): def setUp(self): super(QuotaSetsTest, self).setUp() self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager) self.controller = quotas.QuotaSetsController(self.ext_mgr) def test_format_quota_set(self): raw_quota_set = { 'instances': 10, 'cores': 20, 'ram': 51200, 'floating_ips': 10, 'fixed_ips': -1, 'metadata_items': 128, 'injected_files': 5, 'injected_file_path_bytes': 255, 'injected_file_content_bytes': 10240, 'security_groups': 10, 'security_group_rules': 20, 'key_pairs': 100} quota_set = self.controller._format_quota_set('1234', raw_quota_set) qs = quota_set['quota_set'] self.assertEqual(qs['id'], '1234') self.assertEqual(qs['instances'], 10) self.assertEqual(qs['cores'], 20) self.assertEqual(qs['ram'], 51200) self.assertEqual(qs['floating_ips'], 10) self.assertEqual(qs['fixed_ips'], -1) self.assertEqual(qs['metadata_items'], 128) self.assertEqual(qs['injected_files'], 5) self.assertEqual(qs['injected_file_path_bytes'], 255) self.assertEqual(qs['injected_file_content_bytes'], 10240) self.assertEqual(qs['security_groups'], 10) self.assertEqual(qs['security_group_rules'], 20) self.assertEqual(qs['key_pairs'], 100) def test_quotas_defaults(self): uri = '/v2/fake_tenant/os-quota-sets/fake_tenant/defaults' req = fakes.HTTPRequest.blank(uri) res_dict = self.controller.defaults(req, 'fake_tenant') expected = {'quota_set': { 'id': 'fake_tenant', 'instances': 10, 'cores': 20, 'ram': 51200, 'floating_ips': 10, 'fixed_ips': -1, 'metadata_items': 128, 'injected_files': 5, 'injected_file_path_bytes': 255, 'injected_file_content_bytes': 10240, 'security_groups': 10, 'security_group_rules': 20, 'key_pairs': 100}} self.assertEqual(res_dict, expected) def test_quotas_show_as_admin(self): self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True) self.mox.ReplayAll() req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234', use_admin_context=True) res_dict = self.controller.show(req, 1234) self.assertEqual(res_dict, quota_set('1234')) def test_quotas_show_as_unauthorized_user(self): self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True) self.mox.ReplayAll() req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234') self.assertRaises(webob.exc.HTTPForbidden, self.controller.show, req, 1234) def test_quotas_update_as_admin(self): self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True) self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True) self.mox.ReplayAll() body = {'quota_set': {'instances': 50, 'cores': 50, 'ram': 51200, 'floating_ips': 10, 'fixed_ips': -1, 'metadata_items': 128, 'injected_files': 5, 'injected_file_content_bytes': 10240, 'injected_file_path_bytes': 255, 'security_groups': 10, 'security_group_rules': 20, 'key_pairs': 100, 'fixed_ips': -1}} req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me', use_admin_context=True) res_dict = self.controller.update(req, 'update_me', body) self.assertEqual(res_dict, body) def test_quotas_update_zero_value_as_admin(self): self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True) self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True) self.mox.ReplayAll() body = {'quota_set': {'instances': 0, 'cores': 0, 'ram': 0, 'floating_ips': 0, 'fixed_ips': 0, 'metadata_items': 0, 'injected_files': 0, 'injected_file_content_bytes': 0, 'injected_file_path_bytes': 0, 'security_groups': 0, 'security_group_rules': 0, 'key_pairs': 100, 'fixed_ips': -1}} req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me', use_admin_context=True) res_dict = self.controller.update(req, 'update_me', body) self.assertEqual(res_dict, body) def test_quotas_update_as_user(self): self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True) self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True) self.mox.ReplayAll() body = {'quota_set': {'instances': 50, 'cores': 50, 'ram': 51200, 'floating_ips': 10, 'fixed_ips': -1, 'metadata_items': 128, 'injected_files': 5, 'injected_file_content_bytes': 10240, 'security_groups': 10, 'security_group_rules': 20, 'key_pairs': 100}} req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me') self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, req, 'update_me', body) def test_quotas_update_invalid_key(self): self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True) self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True) self.mox.ReplayAll() body = {'quota_set': {'instances2': -2, 'cores': -2, 'ram': -2, 'floating_ips': -2, 'metadata_items': -2, 'injected_files': -2, 'injected_file_content_bytes': -2}} req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me', use_admin_context=True) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 'update_me', body) def test_quotas_update_invalid_limit(self): self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True) self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True) self.mox.ReplayAll() body = {'quota_set': {'instances': -2, 'cores': -2, 'ram': -2, 'floating_ips': -2, 'fixed_ips': -2, 'metadata_items': -2, 'injected_files': -2, 'injected_file_content_bytes': -2}} req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me', use_admin_context=True) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 'update_me', body) def test_quotas_update_invalid_value_json_fromat_empty_string(self): expected_resp = {'quota_set': { 'instances': 50, 'cores': 50, 'ram': 51200, 'floating_ips': 10, 'fixed_ips': -1, 'metadata_items': 128, 'injected_files': 5, 'injected_file_content_bytes': 10240, 'injected_file_path_bytes': 255, 'security_groups': 10, 'security_group_rules': 20, 'key_pairs': 100}} # when PUT JSON format with empty string for quota body = {'quota_set': {'instances': 50, 'cores': 50, 'ram': '', 'floating_ips': 10, 'fixed_ips': -1, 'metadata_items': 128, 'injected_files': 5, 'injected_file_content_bytes': 10240, 'injected_file_path_bytes': 255, 'security_groups': 10, 'security_group_rules': 20, 'key_pairs': 100}} req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me', use_admin_context=True) self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True) self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True) self.mox.ReplayAll() res_dict = self.controller.update(req, 'update_me', body) self.assertEqual(res_dict, expected_resp) def test_quotas_update_invalid_value_xml_fromat_empty_string(self): expected_resp = {'quota_set': { 'instances': 50, 'cores': 50, 'ram': 51200, 'floating_ips': 10, 'fixed_ips': -1, 'metadata_items': 128, 'injected_files': 5, 'injected_file_content_bytes': 10240, 'injected_file_path_bytes': 255, 'security_groups': 10, 'security_group_rules': 20, 'key_pairs': 100}} # when PUT XML format with empty string for quota body = {'quota_set': {'instances': 50, 'cores': 50, 'ram': {}, 'floating_ips': 10, 'fixed_ips': -1, 'metadata_items': 128, 'injected_files': 5, 'injected_file_content_bytes': 10240, 'injected_file_path_bytes': 255, 'security_groups': 10, 'security_group_rules': 20, 'key_pairs': 100}} req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me', use_admin_context=True) self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True) self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True) self.mox.ReplayAll() res_dict = self.controller.update(req, 'update_me', body) self.assertEqual(res_dict, expected_resp) def test_quotas_update_invalid_value_non_int(self): # when PUT non integer value body = {'quota_set': {'instances': test, 'cores': 50, 'ram': {}, 'floating_ips': 10, 'fixed_ips': -1, 'metadata_items': 128, 'injected_files': 5, 'injected_file_content_bytes': 10240, 'injected_file_path_bytes': 255, 'security_groups': 10, 'security_group_rules': 20, 'key_pairs': 100}} req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me', use_admin_context=True) self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True) self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True) self.mox.ReplayAll() self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 'update_me', body) def test_delete_quotas_when_extension_not_loaded(self): self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(False) self.mox.ReplayAll() req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234') self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, 1234) def test_quotas_delete_as_unauthorized_user(self): self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True) self.mox.ReplayAll() req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234') self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete, req, 1234) def test_quotas_delete_as_admin(self): context = context_maker.get_admin_context() self.req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234') self.req.environ['nova.context'] = context self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True) self.mox.StubOutWithMock(quota.QUOTAS, "destroy_all_by_project") quota.QUOTAS.destroy_all_by_project(context, 1234) self.mox.ReplayAll() res = self.controller.delete(self.req, 1234) self.mox.VerifyAll() self.assertEqual(res.status_int, 202) class QuotaXMLSerializerTest(test.TestCase): def setUp(self): super(QuotaXMLSerializerTest, self).setUp() self.serializer = quotas.QuotaTemplate() self.deserializer = wsgi.XMLDeserializer() def test_serializer(self): exemplar = dict(quota_set=dict( id='project_id', metadata_items=10, injected_file_path_bytes=255, injected_file_content_bytes=20, ram=50, floating_ips=60, fixed_ips=-1, instances=70, injected_files=80, security_groups=10, security_group_rules=20, key_pairs=100, cores=90)) text = self.serializer.serialize(exemplar) tree = etree.fromstring(text) self.assertEqual('quota_set', tree.tag) self.assertEqual('project_id', tree.get('id')) self.assertEqual(len(exemplar['quota_set']) - 1, len(tree)) for child in tree: self.assertTrue(child.tag in exemplar['quota_set']) self.assertEqual(int(child.text), exemplar['quota_set'][child.tag]) def test_deserializer(self): exemplar = dict(quota_set=dict( metadata_items='10', injected_file_content_bytes='20', ram='50', floating_ips='60', fixed_ips='-1', instances='70', injected_files='80', security_groups='10', security_group_rules='20', key_pairs='100', cores='90')) intext = ("<?xml version='1.0' encoding='UTF-8'?>\n" '<quota_set>' '<metadata_items>10</metadata_items>' '<injected_file_content_bytes>20' '</injected_file_content_bytes>' '<ram>50</ram>' '<floating_ips>60</floating_ips>' '<fixed_ips>-1</fixed_ips>' '<instances>70</instances>' '<injected_files>80</injected_files>' '<security_groups>10</security_groups>' '<security_group_rules>20</security_group_rules>' '<key_pairs>100</key_pairs>' '<cores>90</cores>' '</quota_set>') result = self.deserializer.deserialize(intext)['body'] self.assertEqual(result, exemplar) fake_quotas = {'ram': {'limit': 51200, 'in_use': 12800, 'reserved': 12800}, 'cores': {'limit': 20, 'in_use': 10, 'reserved': 5}, 'instances': {'limit': 100, 'in_use': 0, 'reserved': 0}} def fake_get_quotas(self, context, id, user_id=None, usages=False): if usages: return fake_quotas else: return dict((k, v['limit']) for k, v in fake_quotas.items()) class ExtendedQuotasTest(test.TestCase): def setUp(self): super(ExtendedQuotasTest, self).setUp() self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager) self.controller = quotas.QuotaSetsController(self.ext_mgr) def test_quotas_update_exceed_in_used(self): body = {'quota_set': {'cores': 10}} self.stubs.Set(quotas.QuotaSetsController, '_get_quotas', fake_get_quotas) req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me', use_admin_context=True) self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True) self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True) self.mox.ReplayAll() self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 'update_me', body) def test_quotas_force_update_exceed_in_used(self): self.stubs.Set(quotas.QuotaSetsController, '_get_quotas', fake_get_quotas) req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me', use_admin_context=True) expected = {'quota_set': {'ram': 25600, 'instances': 200, 'cores': 10}} body = {'quota_set': {'ram': 25600, 'instances': 200, 'cores': 10, 'force': 'True'}} fake_quotas.get('ram')['limit'] = 25600 fake_quotas.get('cores')['limit'] = 10 fake_quotas.get('instances')['limit'] = 200 self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True) self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True) self.mox.ReplayAll() res_dict = self.controller.update(req, 'update_me', body) self.assertEqual(res_dict, expected) class UserQuotasTest(test.TestCase): def setUp(self): super(UserQuotasTest, self).setUp() self.ext_mgr = self.mox.CreateMock(extensions.ExtensionManager) self.controller = quotas.QuotaSetsController(self.ext_mgr) def test_user_quotas_show_as_admin(self): self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True) self.mox.ReplayAll() req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234?user_id=1', use_admin_context=True) res_dict = self.controller.show(req, 1234) self.assertEqual(res_dict, quota_set('1234')) def test_user_quotas_show_as_unauthorized_user(self): self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True) self.mox.ReplayAll() req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234?user_id=1') self.assertRaises(webob.exc.HTTPForbidden, self.controller.show, req, 1234) def test_user_quotas_update_as_admin(self): self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True) self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True) self.mox.ReplayAll() body = {'quota_set': {'instances': 10, 'cores': 20, 'ram': 51200, 'floating_ips': 10, 'fixed_ips': -1, 'metadata_items': 128, 'injected_files': 5, 'injected_file_content_bytes': 10240, 'injected_file_path_bytes': 255, 'security_groups': 10, 'security_group_rules': 20, 'key_pairs': 100}} url = '/v2/fake4/os-quota-sets/update_me?user_id=1' req = fakes.HTTPRequest.blank(url, use_admin_context=True) res_dict = self.controller.update(req, 'update_me', body) self.assertEqual(res_dict, body) def test_user_quotas_update_as_user(self): self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True) self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True) self.mox.ReplayAll() body = {'quota_set': {'instances': 10, 'cores': 20, 'ram': 51200, 'floating_ips': 10, 'fixed_ips': -1, 'metadata_items': 128, 'injected_files': 5, 'injected_file_content_bytes': 10240, 'security_groups': 10, 'security_group_rules': 20, 'key_pairs': 100}} url = '/v2/fake4/os-quota-sets/update_me?user_id=1' req = fakes.HTTPRequest.blank(url) self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, req, 'update_me', body) def test_user_quotas_update_exceed_project(self): self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True) self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True) self.mox.ReplayAll() body = {'quota_set': {'instances': 20}} url = '/v2/fake4/os-quota-sets/update_me?user_id=1' req = fakes.HTTPRequest.blank(url, use_admin_context=True) self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, req, 'update_me', body) def test_delete_user_quotas_when_extension_not_loaded(self): self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True) self.ext_mgr.is_loaded('os-user-quotas').AndReturn(False) self.mox.ReplayAll() req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234?user_id=1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, req, 1234) def test_user_quotas_delete_as_unauthorized_user(self): self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True) self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True) self.mox.ReplayAll() req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/1234?user_id=1') self.assertRaises(webob.exc.HTTPForbidden, self.controller.delete, req, 1234) def test_user_quotas_delete_as_admin(self): context = context_maker.get_admin_context() url = '/v2/fake4/os-quota-sets/1234?user_id=1' self.req = fakes.HTTPRequest.blank(url) self.req.environ['nova.context'] = context self.ext_mgr.is_loaded('os-extended-quotas').AndReturn(True) self.ext_mgr.is_loaded('os-user-quotas').AndReturn(True) self.mox.StubOutWithMock(quota.QUOTAS, "destroy_all_by_project_and_user") quota.QUOTAS.destroy_all_by_project_and_user(context, 1234, '1') self.mox.ReplayAll() res = self.controller.delete(self.req, 1234) self.mox.VerifyAll() self.assertEqual(res.status_int, 202)
jedie/DragonPy
refs/heads/master
dragonpy/Dragon32/machine.py
1
#!/usr/bin/env python2 """ Dragon 32 ~~~~~~~~~ :created: 2014 by Jens Diemer - www.jensdiemer.de :copyleft: 2014 by the DragonPy team, see AUTHORS for more details. :license: GNU GPL v3 or above, see LICENSE for more details. """ import logging from dragonpy.core.gui import DragonTkinterGUI from dragonpy.core.machine import MachineGUI from dragonpy.Dragon32.config import Dragon32Cfg from dragonpy.Dragon32.periphery_dragon import Dragon32Periphery log = logging.getLogger(__name__) def run_Dragon32(cfg_dict): machine = MachineGUI( cfg=Dragon32Cfg(cfg_dict) ) machine.run( PeripheryClass=Dragon32Periphery, GUI_Class=DragonTkinterGUI ) # ------------------------------------------------------------------------------
sandeepdsouza93/TensorFlow-15712
refs/heads/master
tensorflow/python/framework/tensor_shape_test.py
79
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional tests for shape inference helper classes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.framework import tensor_shape_pb2 from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.platform import googletest class DimensionTest(test_util.TensorFlowTestCase): def testDimension(self): dim = tensor_shape.Dimension(12) self.assertEqual(12, dim.value) self.assertEqual(12, int(dim)) self.assertEqual(dim, tensor_shape.Dimension(12)) self.assertEqual(tensor_shape.Dimension(15), dim + tensor_shape.Dimension(3)) self.assertEqual(tensor_shape.Dimension(15), dim + 3) self.assertEqual(tensor_shape.Dimension(24), dim * tensor_shape.Dimension(2)) self.assertEqual(tensor_shape.Dimension(24), dim * 2) self.assertEqual( tensor_shape.Dimension(6), dim // tensor_shape.Dimension(2)) self.assertEqual(tensor_shape.Dimension(6), dim // 2) self.assertEqual(tensor_shape.Dimension(12), dim.merge_with(tensor_shape.Dimension(12))) self.assertEqual(tensor_shape.Dimension(12), dim.merge_with(12)) self.assertLess(tensor_shape.Dimension(12), tensor_shape.Dimension(13)) self.assertGreater(tensor_shape.Dimension(13), tensor_shape.Dimension(12)) self.assertLessEqual(tensor_shape.Dimension(12), tensor_shape.Dimension(12)) self.assertLessEqual(tensor_shape.Dimension(12), tensor_shape.Dimension(13)) self.assertGreater(tensor_shape.Dimension(13), tensor_shape.Dimension(12)) self.assertGreaterEqual(tensor_shape.Dimension(12), tensor_shape.Dimension(12)) self.assertGreaterEqual(tensor_shape.Dimension(13), tensor_shape.Dimension(12)) self.assertNotEqual(dim, (12,)) with self.assertRaises(ValueError): dim.merge_with(tensor_shape.Dimension(13)) def testUnknownDimension(self): dim = tensor_shape.Dimension(None) self.assertIs(None, dim.value) self.assertEqual(dim.value, tensor_shape.Dimension(None).value) self.assertEqual(tensor_shape.Dimension(None).value, (dim + tensor_shape.Dimension(None)).value) self.assertEqual(tensor_shape.Dimension(None).value, (dim * tensor_shape.Dimension(None)).value) self.assertEqual( tensor_shape.Dimension(None).value, (dim // tensor_shape.Dimension(None)).value) self.assertEqual(tensor_shape.Dimension(None).value, dim.merge_with(tensor_shape.Dimension(None)).value) self.assertIs(None, tensor_shape.Dimension(None) < tensor_shape.Dimension(None)) self.assertIs(None, tensor_shape.Dimension(None) <= tensor_shape.Dimension(None)) self.assertIs(None, tensor_shape.Dimension(None) > tensor_shape.Dimension(None)) self.assertIs(None, tensor_shape.Dimension(None) >= tensor_shape.Dimension(None)) def testKnownAndUnknownDimensions(self): known = tensor_shape.Dimension(12) unknown = tensor_shape.Dimension(None) self.assertEqual( tensor_shape.Dimension(None).value, (known + unknown).value) self.assertEqual( tensor_shape.Dimension(None).value, (unknown + known).value) self.assertEqual( tensor_shape.Dimension(None).value, (known * unknown).value) self.assertEqual( tensor_shape.Dimension(None).value, (unknown * known).value) self.assertEqual( tensor_shape.Dimension(None).value, (known // unknown).value) self.assertEqual( tensor_shape.Dimension(None).value, (unknown // known).value) self.assertEqual( tensor_shape.Dimension(12), known.merge_with(unknown)) self.assertEqual( tensor_shape.Dimension(12), unknown.merge_with(known)) self.assertIs(None, tensor_shape.Dimension(12) < tensor_shape.Dimension(None)) self.assertIs(None, tensor_shape.Dimension(12) <= tensor_shape.Dimension(None)) self.assertIs(None, tensor_shape.Dimension(12) > tensor_shape.Dimension(None)) self.assertIs(None, tensor_shape.Dimension(12) >= tensor_shape.Dimension(None)) self.assertIs(None, tensor_shape.Dimension(None) < tensor_shape.Dimension(12)) self.assertIs(None, tensor_shape.Dimension(None) <= tensor_shape.Dimension(12)) self.assertIs(None, tensor_shape.Dimension(None) > tensor_shape.Dimension(12)) self.assertIs(None, tensor_shape.Dimension(None) >= tensor_shape.Dimension(12)) def testAsDimension(self): self.assertEqual(tensor_shape.Dimension(12), tensor_shape.as_dimension(tensor_shape.Dimension(12))) self.assertEqual(tensor_shape.Dimension(12), tensor_shape.as_dimension(12)) self.assertEqual( tensor_shape.Dimension(None).value, tensor_shape.as_dimension(tensor_shape.Dimension(None)).value) self.assertEqual(tensor_shape.Dimension(None).value, tensor_shape.as_dimension(None).value) def testEquality(self): self.assertTrue(tensor_shape.Dimension(12) == tensor_shape.Dimension(12)) self.assertFalse(tensor_shape.Dimension(12) == tensor_shape.Dimension(13)) self.assertIs(None, tensor_shape.Dimension(12) == tensor_shape.Dimension(None)) self.assertIs(None, tensor_shape.Dimension(None) == tensor_shape.Dimension(12)) self.assertIs(None, tensor_shape.Dimension(None) == tensor_shape.Dimension(None)) self.assertTrue(tensor_shape.Dimension(12) == "12") self.assertTrue(tensor_shape.Dimension(12) == 24.0 / 2) # None indicates ambiguous comparison, but comparison vs the wrong type # is unambigously False. self.assertIsNotNone(tensor_shape.Dimension(12) == "_") self.assertIsNotNone(tensor_shape.Dimension(None) == 12.99) self.assertFalse(tensor_shape.Dimension(12) == "_") self.assertFalse(tensor_shape.Dimension(None) == 12.99) self.assertIs(None, tensor_shape.Dimension(None) == "13") self.assertIs(None, tensor_shape.Dimension(None) == None) # pylint: disable=g-equals-none self.assertFalse(tensor_shape.Dimension(12) == 12.99) def testInequality(self): self.assertTrue(tensor_shape.Dimension(12) != tensor_shape.Dimension(13)) self.assertFalse(tensor_shape.Dimension(12) != tensor_shape.Dimension(12)) self.assertIs(None, tensor_shape.Dimension(12) != tensor_shape.Dimension(None)) self.assertIs(None, tensor_shape.Dimension(None) != tensor_shape.Dimension(12)) self.assertIs(None, tensor_shape.Dimension(None) != tensor_shape.Dimension(None)) # None indicates ambiguous comparison, but comparison vs the wrong type # is unambigously False. self.assertIsNotNone(tensor_shape.Dimension(12) != "_") self.assertIsNotNone(tensor_shape.Dimension(None) != 12.99) self.assertTrue(tensor_shape.Dimension(12) != "_") self.assertTrue(tensor_shape.Dimension(None) != 12.99) self.assertIs(None, tensor_shape.Dimension(None) != "13") self.assertIs(None, tensor_shape.Dimension(None) != None) # pylint: disable=g-equals-none self.assertTrue(tensor_shape.Dimension(12) != 12.99) def testRepr(self): self.assertEqual(repr(tensor_shape.Dimension(7)), "Dimension(7)") self.assertEqual(repr(tensor_shape.Dimension(None)), "Dimension(None)") def testStr(self): self.assertEqual(str(tensor_shape.Dimension(7)), "7") self.assertEqual(str(tensor_shape.Dimension(None)), "?") class ShapeTest(test_util.TensorFlowTestCase): def testUnknownShape(self): s = tensor_shape.TensorShape(None) with self.assertRaises(ValueError): s.assert_is_fully_defined() self.assertIs(None, s.ndims) with self.assertRaises(ValueError): len(s) self.assertFalse(s) self.assertIs(None, s.dims) with self.assertRaises(ValueError): for _ in tensor_shape.TensorShape(None): pass def testFullyDefinedShape(self): s = tensor_shape.TensorShape([tensor_shape.Dimension( 3), tensor_shape.Dimension(4), tensor_shape.Dimension(7)]) s.assert_is_fully_defined() self.assertEqual(3, s.ndims) self.assertEqual(3, len(s)) self.assertTrue(s) s.assert_has_rank(3) self.assertEqual([tensor_shape.Dimension(3), tensor_shape.Dimension(4), tensor_shape.Dimension(7)], s.dims) self.assertEqual(tensor_shape.Dimension(3), s[0]) self.assertEqual(tensor_shape.Dimension(4), s[1]) self.assertEqual(tensor_shape.Dimension(7), s[2]) self.assertEqual([3, 4, 7], s.as_list()) s.assert_is_compatible_with([3, 4, 7]) s.assert_same_rank([6, 3, 7]) for d1, d2 in zip(s, [3, 4, 7]): assert d1.value == d2 def testPartiallyDefinedShape(self): s = tensor_shape.TensorShape([tensor_shape.Dimension( 3), tensor_shape.Dimension(None), tensor_shape.Dimension(7)]) with self.assertRaises(ValueError): s.assert_is_fully_defined() self.assertEqual(3, s.ndims) self.assertEqual(3, len(s)) self.assertTrue(s) s.assert_has_rank(3) self.assertEqual(tensor_shape.Dimension(3), s[0]) self.assertEqual(tensor_shape.Dimension(None).value, s[1].value) self.assertEqual(tensor_shape.Dimension(7), s[2]) s.assert_same_rank([6, 3, 7]) for d1, d2 in zip(s, [3, None, 7]): assert d1.value == d2 def testMergeFullShapes(self): self.assertEqual([3, 4, 7], tensor_shape.TensorShape([3, 4, 7]).merge_with( tensor_shape.TensorShape([3, 4, 7])).as_list()) with self.assertRaises(ValueError): tensor_shape.TensorShape([3, 4, 7]).merge_with( tensor_shape.TensorShape([6, 3, 7])) def testMergePartialShapes(self): s1 = tensor_shape.TensorShape([tensor_shape.Dimension( 3), tensor_shape.Dimension(None), tensor_shape.Dimension(7)]) s2 = tensor_shape.TensorShape([tensor_shape.Dimension( None), tensor_shape.Dimension(4), tensor_shape.Dimension(7)]) self.assertEqual([3, 4, 7], s1.merge_with(s2).as_list()) def testMergeFullAndUnknownShape(self): self.assertEqual([3, 4, 7], tensor_shape.TensorShape([3, 4, 7]).merge_with( tensor_shape.TensorShape(None)).as_list()) def testSlice(self): known = tensor_shape.TensorShape([0, 1, 2, 3, 4]) self.assertEqual(tensor_shape.Dimension(2), known[2]) tensor_shape.TensorShape([1, 2, 3]).assert_is_compatible_with(known[1:4]) unknown = tensor_shape.TensorShape(None) self.assertEqual(tensor_shape.Dimension(None).value, unknown[2].value) tensor_shape.TensorShape( [None, None, None]).assert_is_compatible_with(unknown[1:4]) def testConcatenate(self): tensor_shape.TensorShape([1, 2, 3, 4]).assert_is_compatible_with( tensor_shape.TensorShape([1, 2]).concatenate( tensor_shape.TensorShape([3, 4]))) tensor_shape.TensorShape([1, 2, 3, 4]).assert_is_compatible_with( tensor_shape.TensorShape([1, 2]).concatenate( tensor_shape.TensorShape(None))) tensor_shape.TensorShape([1, 2, 3, 4]).assert_is_compatible_with( tensor_shape.TensorShape(None).concatenate( tensor_shape.TensorShape([3, 4]))) tensor_shape.TensorShape([1, 2, 3, 4]).assert_is_compatible_with( tensor_shape.TensorShape(None).concatenate( tensor_shape.TensorShape(None))) tensor_shape.TensorShape([1, 2, 3]).assert_is_compatible_with( tensor_shape.TensorShape([1, 2]).concatenate( tensor_shape.Dimension(3))) def testHelpers(self): tensor_shape.TensorShape([]).assert_is_compatible_with( tensor_shape.scalar()) tensor_shape.TensorShape([37]).assert_is_compatible_with( tensor_shape.vector(37)) tensor_shape.TensorShape( [94, 43]).assert_is_compatible_with(tensor_shape.matrix(94, 43)) def testTruedivFails(self): unknown = tensor_shape.Dimension(None) self.assertEqual((unknown // unknown).value, None) with self.assertRaisesRegexp(TypeError, r"unsupported operand type"): unknown / unknown # pylint: disable=pointless-statement def testConvertFromProto(self): def make_tensor_shape_proto(shape): return tensor_shape_pb2.TensorShapeProto( dim=[tensor_shape_pb2.TensorShapeProto.Dim(size=x) for x in shape]) proto = make_tensor_shape_proto([]) self.assertEqual(tensor_shape.TensorShape([]), tensor_shape.TensorShape(proto)) self.assertEqual(tensor_shape.TensorShape([]), tensor_shape.as_shape(proto)) proto = make_tensor_shape_proto([1, 37, 42]) self.assertEqual(tensor_shape.TensorShape([1, 37, 42]), tensor_shape.TensorShape(proto)) self.assertEqual(tensor_shape.TensorShape([1, 37, 42]), tensor_shape.as_shape(proto)) partial_proto_shape = tensor_shape.as_shape( make_tensor_shape_proto([-1, 37, 42])) partial_shape = tensor_shape.TensorShape([None, 37, 42]) self.assertNotEqual(partial_proto_shape, partial_shape) self.assertEqual(partial_proto_shape[0].value, None) self.assertEqual(partial_proto_shape[1].value, 37) self.assertEqual(partial_proto_shape[2].value, 42) self.assertTrue(partial_shape.is_compatible_with(partial_proto_shape)) def testStr(self): self.assertEqual("<unknown>", str(tensor_shape.unknown_shape())) self.assertEqual("(?,)", str(tensor_shape.unknown_shape(ndims=1))) self.assertEqual("(?, ?)", str(tensor_shape.unknown_shape(ndims=2))) self.assertEqual("(?, ?, ?)", str(tensor_shape.unknown_shape(ndims=3))) self.assertEqual("()", str(tensor_shape.scalar())) self.assertEqual("(7,)", str(tensor_shape.vector(7))) self.assertEqual("(3, 8)", str(tensor_shape.matrix(3, 8))) self.assertEqual("(4, 5, 2)", str(tensor_shape.TensorShape([4, 5, 2]))) self.assertEqual("(32, ?, 1, 9)", str(tensor_shape.TensorShape([32, None, 1, 9]))) def testAsProto(self): self.assertTrue(tensor_shape.unknown_shape().as_proto().unknown_rank) self.assertFalse( tensor_shape.unknown_shape(ndims=3).as_proto().unknown_rank) self.assertFalse( tensor_shape.TensorShape([1, 2, 3]).as_proto().unknown_rank) self.assertFalse( tensor_shape.TensorShape([1, None, 3]).as_proto().unknown_rank) def testEquality(self): s1 = tensor_shape.TensorShape([tensor_shape.Dimension( 3), tensor_shape.Dimension(4), tensor_shape.Dimension(7)]) s2 = tensor_shape.TensorShape([tensor_shape.Dimension( 3), tensor_shape.Dimension(4), tensor_shape.Dimension(7)]) s3 = tensor_shape.TensorShape([tensor_shape.Dimension(3), tensor_shape.Dimension(4), None]) self.assertTrue(s1 == s2) self.assertFalse(s1 != s2) self.assertFalse(s1 == "a string") self.assertTrue(s1 != "a string") self.assertNotEqual(s1, "347", "Should not equal an ambiguous string.") self.assertEqual(s1, ["3", "4", "7"]) # Test with an unknown shape in s3 self.assertTrue(s1 != s3) self.assertFalse(s3 == "a string") self.assertTrue(s3 != "a string") # eq and neq are not symmetric for unknown shapes. unk0 = tensor_shape.unknown_shape() self.assertFalse(unk0 == s1) self.assertFalse(s1 == unk0) with self.assertRaises(ValueError): unk0 != s1 # pylint: disable=pointless-statement with self.assertRaises(ValueError): s1 != unk0 # pylint: disable=pointless-statement unk1 = tensor_shape.unknown_shape() self.assertTrue(unk0 == unk1) self.assertTrue(unk1 == unk0) with self.assertRaises(ValueError): unk0 != unk1 # pylint: disable=pointless-statement with self.assertRaises(ValueError): unk1 != unk0 # pylint: disable=pointless-statement def testAsList(self): with self.assertRaisesRegexp(ValueError, "not defined on an unknown TensorShape"): tensor_shape.unknown_shape().as_list() self.assertAllEqual([None, None], tensor_shape.unknown_shape(2).as_list()) self.assertAllEqual([2, None, 4], tensor_shape.TensorShape( (2, None, 4)).as_list()) if __name__ == "__main__": googletest.main()
40223119/2015w11
refs/heads/master
static/Brython3.1.0-20150301-090019/Lib/unittest/test/testmock/testmagicmethods.py
737
import unittest import inspect import sys from unittest.mock import Mock, MagicMock, _magics class TestMockingMagicMethods(unittest.TestCase): def test_deleting_magic_methods(self): mock = Mock() self.assertFalse(hasattr(mock, '__getitem__')) mock.__getitem__ = Mock() self.assertTrue(hasattr(mock, '__getitem__')) del mock.__getitem__ self.assertFalse(hasattr(mock, '__getitem__')) def test_magicmock_del(self): mock = MagicMock() # before using getitem del mock.__getitem__ self.assertRaises(TypeError, lambda: mock['foo']) mock = MagicMock() # this time use it first mock['foo'] del mock.__getitem__ self.assertRaises(TypeError, lambda: mock['foo']) def test_magic_method_wrapping(self): mock = Mock() def f(self, name): return self, 'fish' mock.__getitem__ = f self.assertFalse(mock.__getitem__ is f) self.assertEqual(mock['foo'], (mock, 'fish')) self.assertEqual(mock.__getitem__('foo'), (mock, 'fish')) mock.__getitem__ = mock self.assertTrue(mock.__getitem__ is mock) def test_magic_methods_isolated_between_mocks(self): mock1 = Mock() mock2 = Mock() mock1.__iter__ = Mock(return_value=iter([])) self.assertEqual(list(mock1), []) self.assertRaises(TypeError, lambda: list(mock2)) def test_repr(self): mock = Mock() self.assertEqual(repr(mock), "<Mock id='%s'>" % id(mock)) mock.__repr__ = lambda s: 'foo' self.assertEqual(repr(mock), 'foo') def test_str(self): mock = Mock() self.assertEqual(str(mock), object.__str__(mock)) mock.__str__ = lambda s: 'foo' self.assertEqual(str(mock), 'foo') def test_dict_methods(self): mock = Mock() self.assertRaises(TypeError, lambda: mock['foo']) def _del(): del mock['foo'] def _set(): mock['foo'] = 3 self.assertRaises(TypeError, _del) self.assertRaises(TypeError, _set) _dict = {} def getitem(s, name): return _dict[name] def setitem(s, name, value): _dict[name] = value def delitem(s, name): del _dict[name] mock.__setitem__ = setitem mock.__getitem__ = getitem mock.__delitem__ = delitem self.assertRaises(KeyError, lambda: mock['foo']) mock['foo'] = 'bar' self.assertEqual(_dict, {'foo': 'bar'}) self.assertEqual(mock['foo'], 'bar') del mock['foo'] self.assertEqual(_dict, {}) def test_numeric(self): original = mock = Mock() mock.value = 0 self.assertRaises(TypeError, lambda: mock + 3) def add(self, other): mock.value += other return self mock.__add__ = add self.assertEqual(mock + 3, mock) self.assertEqual(mock.value, 3) del mock.__add__ def iadd(mock): mock += 3 self.assertRaises(TypeError, iadd, mock) mock.__iadd__ = add mock += 6 self.assertEqual(mock, original) self.assertEqual(mock.value, 9) self.assertRaises(TypeError, lambda: 3 + mock) mock.__radd__ = add self.assertEqual(7 + mock, mock) self.assertEqual(mock.value, 16) def test_hash(self): mock = Mock() # test delegation self.assertEqual(hash(mock), Mock.__hash__(mock)) def _hash(s): return 3 mock.__hash__ = _hash self.assertEqual(hash(mock), 3) def test_nonzero(self): m = Mock() self.assertTrue(bool(m)) m.__bool__ = lambda s: False self.assertFalse(bool(m)) def test_comparison(self): mock = Mock() def comp(s, o): return True mock.__lt__ = mock.__gt__ = mock.__le__ = mock.__ge__ = comp self. assertTrue(mock < 3) self. assertTrue(mock > 3) self. assertTrue(mock <= 3) self. assertTrue(mock >= 3) self.assertRaises(TypeError, lambda: MagicMock() < object()) self.assertRaises(TypeError, lambda: object() < MagicMock()) self.assertRaises(TypeError, lambda: MagicMock() < MagicMock()) self.assertRaises(TypeError, lambda: MagicMock() > object()) self.assertRaises(TypeError, lambda: object() > MagicMock()) self.assertRaises(TypeError, lambda: MagicMock() > MagicMock()) self.assertRaises(TypeError, lambda: MagicMock() <= object()) self.assertRaises(TypeError, lambda: object() <= MagicMock()) self.assertRaises(TypeError, lambda: MagicMock() <= MagicMock()) self.assertRaises(TypeError, lambda: MagicMock() >= object()) self.assertRaises(TypeError, lambda: object() >= MagicMock()) self.assertRaises(TypeError, lambda: MagicMock() >= MagicMock()) def test_equality(self): for mock in Mock(), MagicMock(): self.assertEqual(mock == mock, True) self.assertIsInstance(mock == mock, bool) self.assertEqual(mock != mock, False) self.assertIsInstance(mock != mock, bool) self.assertEqual(mock == object(), False) self.assertEqual(mock != object(), True) def eq(self, other): return other == 3 mock.__eq__ = eq self.assertTrue(mock == 3) self.assertFalse(mock == 4) def ne(self, other): return other == 3 mock.__ne__ = ne self.assertTrue(mock != 3) self.assertFalse(mock != 4) mock = MagicMock() mock.__eq__.return_value = True self.assertIsInstance(mock == 3, bool) self.assertEqual(mock == 3, True) mock.__ne__.return_value = False self.assertIsInstance(mock != 3, bool) self.assertEqual(mock != 3, False) def test_len_contains_iter(self): mock = Mock() self.assertRaises(TypeError, len, mock) self.assertRaises(TypeError, iter, mock) self.assertRaises(TypeError, lambda: 'foo' in mock) mock.__len__ = lambda s: 6 self.assertEqual(len(mock), 6) mock.__contains__ = lambda s, o: o == 3 self.assertTrue(3 in mock) self.assertFalse(6 in mock) mock.__iter__ = lambda s: iter('foobarbaz') self.assertEqual(list(mock), list('foobarbaz')) def test_magicmock(self): mock = MagicMock() mock.__iter__.return_value = iter([1, 2, 3]) self.assertEqual(list(mock), [1, 2, 3]) getattr(mock, '__bool__').return_value = False self.assertFalse(hasattr(mock, '__nonzero__')) self.assertFalse(bool(mock)) for entry in _magics: self.assertTrue(hasattr(mock, entry)) self.assertFalse(hasattr(mock, '__imaginery__')) def test_magic_mock_equality(self): mock = MagicMock() self.assertIsInstance(mock == object(), bool) self.assertIsInstance(mock != object(), bool) self.assertEqual(mock == object(), False) self.assertEqual(mock != object(), True) self.assertEqual(mock == mock, True) self.assertEqual(mock != mock, False) def test_magicmock_defaults(self): mock = MagicMock() self.assertEqual(int(mock), 1) self.assertEqual(complex(mock), 1j) self.assertEqual(float(mock), 1.0) self.assertNotIn(object(), mock) self.assertEqual(len(mock), 0) self.assertEqual(list(mock), []) self.assertEqual(hash(mock), object.__hash__(mock)) self.assertEqual(str(mock), object.__str__(mock)) self.assertTrue(bool(mock)) # in Python 3 oct and hex use __index__ # so these tests are for __index__ in py3k self.assertEqual(oct(mock), '0o1') self.assertEqual(hex(mock), '0x1') # how to test __sizeof__ ? def test_magic_methods_and_spec(self): class Iterable(object): def __iter__(self): pass mock = Mock(spec=Iterable) self.assertRaises(AttributeError, lambda: mock.__iter__) mock.__iter__ = Mock(return_value=iter([])) self.assertEqual(list(mock), []) class NonIterable(object): pass mock = Mock(spec=NonIterable) self.assertRaises(AttributeError, lambda: mock.__iter__) def set_int(): mock.__int__ = Mock(return_value=iter([])) self.assertRaises(AttributeError, set_int) mock = MagicMock(spec=Iterable) self.assertEqual(list(mock), []) self.assertRaises(AttributeError, set_int) def test_magic_methods_and_spec_set(self): class Iterable(object): def __iter__(self): pass mock = Mock(spec_set=Iterable) self.assertRaises(AttributeError, lambda: mock.__iter__) mock.__iter__ = Mock(return_value=iter([])) self.assertEqual(list(mock), []) class NonIterable(object): pass mock = Mock(spec_set=NonIterable) self.assertRaises(AttributeError, lambda: mock.__iter__) def set_int(): mock.__int__ = Mock(return_value=iter([])) self.assertRaises(AttributeError, set_int) mock = MagicMock(spec_set=Iterable) self.assertEqual(list(mock), []) self.assertRaises(AttributeError, set_int) def test_setting_unsupported_magic_method(self): mock = MagicMock() def set_setattr(): mock.__setattr__ = lambda self, name: None self.assertRaisesRegex(AttributeError, "Attempting to set unsupported magic method '__setattr__'.", set_setattr ) def test_attributes_and_return_value(self): mock = MagicMock() attr = mock.foo def _get_type(obj): # the type of every mock (or magicmock) is a custom subclass # so the real type is the second in the mro return type(obj).__mro__[1] self.assertEqual(_get_type(attr), MagicMock) returned = mock() self.assertEqual(_get_type(returned), MagicMock) def test_magic_methods_are_magic_mocks(self): mock = MagicMock() self.assertIsInstance(mock.__getitem__, MagicMock) mock[1][2].__getitem__.return_value = 3 self.assertEqual(mock[1][2][3], 3) def test_magic_method_reset_mock(self): mock = MagicMock() str(mock) self.assertTrue(mock.__str__.called) mock.reset_mock() self.assertFalse(mock.__str__.called) def test_dir(self): # overriding the default implementation for mock in Mock(), MagicMock(): def _dir(self): return ['foo'] mock.__dir__ = _dir self.assertEqual(dir(mock), ['foo']) @unittest.skipIf('PyPy' in sys.version, "This fails differently on pypy") def test_bound_methods(self): m = Mock() # XXXX should this be an expected failure instead? # this seems like it should work, but is hard to do without introducing # other api inconsistencies. Failure message could be better though. m.__iter__ = [3].__iter__ self.assertRaises(TypeError, iter, m) def test_magic_method_type(self): class Foo(MagicMock): pass foo = Foo() self.assertIsInstance(foo.__int__, Foo) def test_descriptor_from_class(self): m = MagicMock() type(m).__str__.return_value = 'foo' self.assertEqual(str(m), 'foo') def test_iterable_as_iter_return_value(self): m = MagicMock() m.__iter__.return_value = [1, 2, 3] self.assertEqual(list(m), [1, 2, 3]) self.assertEqual(list(m), [1, 2, 3]) m.__iter__.return_value = iter([4, 5, 6]) self.assertEqual(list(m), [4, 5, 6]) self.assertEqual(list(m), []) if __name__ == '__main__': unittest.main()
mparus/android_kernel_huawei_msm8916_g760
refs/heads/master
tools/perf/python/twatch.py
7370
#! /usr/bin/python # -*- python -*- # -*- coding: utf-8 -*- # twatch - Experimental use of the perf python interface # Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com> # # This application is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 2. # # This application is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. import perf def main(): cpus = perf.cpu_map() threads = perf.thread_map() evsel = perf.evsel(task = 1, comm = 1, mmap = 0, wakeup_events = 1, watermark = 1, sample_id_all = 1, sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID) evsel.open(cpus = cpus, threads = threads); evlist = perf.evlist(cpus, threads) evlist.add(evsel) evlist.mmap() while True: evlist.poll(timeout = -1) for cpu in cpus: event = evlist.read_on_cpu(cpu) if not event: continue print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu, event.sample_pid, event.sample_tid), print event if __name__ == '__main__': main()
jwlawson/tensorflow
refs/heads/master
tensorflow/contrib/learn/python/learn/ops/ops_test.py
94
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Ops tests.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.layers import conv2d from tensorflow.contrib.learn.python.learn import ops from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import random_seed from tensorflow.python.ops import variables from tensorflow.python.ops import array_ops from tensorflow.python.platform import test class OpsTest(test.TestCase): """Ops tests.""" def test_softmax_classifier(self): with self.test_session() as session: features = array_ops.placeholder(dtypes.float32, [None, 3]) labels = array_ops.placeholder(dtypes.float32, [None, 2]) weights = constant_op.constant([[0.1, 0.1], [0.1, 0.1], [0.1, 0.1]]) biases = constant_op.constant([0.2, 0.3]) class_weight = constant_op.constant([0.1, 0.9]) prediction, loss = ops.softmax_classifier(features, labels, weights, biases, class_weight) self.assertEqual(prediction.get_shape()[1], 2) self.assertEqual(loss.get_shape(), []) value = session.run(loss, {features: [[0.2, 0.3, 0.2]], labels: [[0, 1]]}) self.assertAllClose(value, 0.55180627) def test_embedding_lookup(self): d_embed = 5 n_embed = 10 ids_shape = (2, 3, 4) embeds = np.random.randn(n_embed, d_embed) ids = np.random.randint(0, n_embed, ids_shape) with self.test_session(): embed_np = embeds[ids] embed_tf = ops.embedding_lookup(embeds, ids).eval() self.assertEqual(embed_np.shape, embed_tf.shape) self.assertAllClose(embed_np, embed_tf) def test_categorical_variable(self): random_seed.set_random_seed(42) with self.test_session() as sess: cat_var_idx = array_ops.placeholder(dtypes.int64, [2, 2]) embeddings = ops.categorical_variable( cat_var_idx, n_classes=5, embedding_size=10, name="my_cat_var") sess.run(variables.global_variables_initializer()) emb1 = sess.run(embeddings, feed_dict={cat_var_idx.name: [[0, 1], [2, 3]]}) emb2 = sess.run(embeddings, feed_dict={cat_var_idx.name: [[0, 2], [1, 3]]}) self.assertEqual(emb1.shape, emb2.shape) self.assertAllEqual(np.transpose(emb2, axes=[1, 0, 2]), emb1) if __name__ == "__main__": test.main()
noamkatzir/palm-hand-reading
refs/heads/master
tests/test8.py
1
__author__ = 'noam' import os import cv2 import numpy as np from matplotlib import pyplot as plt from numpy import linalg from operator import itemgetter, attrgetter, methodcaller # return list of center sorted descending to the contour area # and the min rect cordinates def mapPalmAndFingers(contours, image): handElements = [] for i in xrange(len(contours)): M = cv2.moments(contours[i]) centroid_x = int(M['m10']/M['m00']) centroid_y = int(M['m01']/M['m00']) # cv2.circle(lefthand, (centroid_x, centroid_y), 10, (255, 0, 0),-1) element = { 'center': (centroid_x, centroid_y), 'contour': contours[i], 'area': cv2.contourArea(contours[i]), 'palm': False } handElements.append(element) # mapp the distance of hand elements from the palm palm = max(handElements, key=lambda x: x['area']) palm['palm'] = True palm['dist'] = 0 for handElement in handElements: if handElement['palm'] is False: handElement['dist'] = linalg.norm(np.array(handElement['center']) - np.array(palm['center'])) # sorting the hand helements by the distance from the palm # because this way we will find the end of the fingers handElements = sorted(handElements, key=itemgetter('dist'), reverse=True) rows,cols = image.shape[:2] fingersEnd = [] for handElement in handElements: maskPalm = np.zeros(image.shape, np.uint8) maskLine = np.zeros(image.shape, np.uint8) cv2.drawContours(maskPalm, [palm['contour']], 0, 1, -1) [vx, vy, x, y] = cv2.fitLine(handElement['contour'], cv2.cv.CV_DIST_L2, 0, 0.01, 0.01) lefty = int((-x*vy/vx) + y) righty = int(((cols-x)*vy/vx)+y) cv2.line(maskLine, (cols-1, righty), (0, lefty), 1, 2) cv2.line(image, (cols-1, righty), (0, lefty), 1, 2) result = maskLine * maskPalm cv2.drawContours(maskPalm, [handElement['contour']], 0, 255, 1) cv2.drawContours(maskPalm, [palm['contour']], 0, 255, 1) # cv2.imshow('aaa'+int(handElement['area']),result) plt.figure(int(handElement['area'])) plt.imshow((maskLine + maskPalm), cmap='gray') if np.sum(result) > 0: fingersEnd.append(handElement) # plt.figure(2) # plt.imshow(result, cmap='gray') # plt.show() return fingersEnd, palm # images/preprocessed/noam_left_hand_6.12.08_02062015.png imagesPath = '../images/preprocessed/' lefthand = cv2.imread(imagesPath+'noam_left_hand_30.5.15_02062015_0001.png') # lefthand = cv2.imread(imagesPath+'noam_left_hand_6.12.08_02062015.png') small_color = cv2.resize(lefthand, (0, 0), fx=0.5**5, fy=0.5**5) lefthand_imgray = cv2.cvtColor(lefthand,cv2.COLOR_BGR2GRAY) # I resize the image to very samll size and then get the samll image back to the size of the origin # this way I remove most of the noise and keep the large objects small = cv2.resize(lefthand_imgray, (0, 0), fx=0.5**5, fy=0.5**5) # I put the threshold to get the hand elements binay image ret, threshold1 = cv2.threshold(small, 245, 255, cv2.THRESH_BINARY_INV) contours, hierarchy = cv2.findContours(threshold1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) mask = np.zeros(threshold1. shape, np.uint8) # I'm filling the contours of with large area to ensure I get a the objects with minimum noise for i in xrange(len(contours)): area = cv2.contourArea(contours[i]) # in origin it was 300000, but it start ignoring fingers if area > 10: cv2.drawContours(mask, contours, i, 255, -1) # converting again to binary image & finding the contours of the large objects ret, threshold2 = cv2.threshold(mask, 0, 256, cv2.THRESH_BINARY) contours2, hierarchy2 = cv2.findContours(mask.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) mask = mask.astype('uint8') cv2.drawContours(small_color, contours2, -1, (0, 255, 0), 1) elements, palm = mapPalmAndFingers(contours2, small) for element in elements: cv2.line(small_color, palm['center'], element['center'], (0, 255, 0), 1) ellipse = cv2.fitEllipse(element['contour']) cv2.ellipse(small_color, ellipse, (255, 0, 0), 1) # can add -1 to the tickness to fill the ellipse #cv2.putText(lefthand, '{}'.format(dist), element['center'], cv2.FONT_HERSHEY_SIMPLEX, 4, 1, 20) # largeCon = np.concatenate((elements[0]['contour'], elements[1]['contour']), axis=0) # ellipse = cv2.fitEllipse(largeCon) # cv2.ellipse(lefthand, ellipse, (255, 0, 0), 2) plt.figure(1) plt.imshow(mask, cmap='gray') plt.figure(2) plt.imshow(small_color, cmap='gray') plt.show()
xHeliotrope/injustice_dropper
refs/heads/master
env/lib/python3.4/site-packages/pip/_vendor/requests/packages/chardet/sbcharsetprober.py
2926
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 2001 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### import sys from . import constants from .charsetprober import CharSetProber from .compat import wrap_ord SAMPLE_SIZE = 64 SB_ENOUGH_REL_THRESHOLD = 1024 POSITIVE_SHORTCUT_THRESHOLD = 0.95 NEGATIVE_SHORTCUT_THRESHOLD = 0.05 SYMBOL_CAT_ORDER = 250 NUMBER_OF_SEQ_CAT = 4 POSITIVE_CAT = NUMBER_OF_SEQ_CAT - 1 #NEGATIVE_CAT = 0 class SingleByteCharSetProber(CharSetProber): def __init__(self, model, reversed=False, nameProber=None): CharSetProber.__init__(self) self._mModel = model # TRUE if we need to reverse every pair in the model lookup self._mReversed = reversed # Optional auxiliary prober for name decision self._mNameProber = nameProber self.reset() def reset(self): CharSetProber.reset(self) # char order of last character self._mLastOrder = 255 self._mSeqCounters = [0] * NUMBER_OF_SEQ_CAT self._mTotalSeqs = 0 self._mTotalChar = 0 # characters that fall in our sampling range self._mFreqChar = 0 def get_charset_name(self): if self._mNameProber: return self._mNameProber.get_charset_name() else: return self._mModel['charsetName'] def feed(self, aBuf): if not self._mModel['keepEnglishLetter']: aBuf = self.filter_without_english_letters(aBuf) aLen = len(aBuf) if not aLen: return self.get_state() for c in aBuf: order = self._mModel['charToOrderMap'][wrap_ord(c)] if order < SYMBOL_CAT_ORDER: self._mTotalChar += 1 if order < SAMPLE_SIZE: self._mFreqChar += 1 if self._mLastOrder < SAMPLE_SIZE: self._mTotalSeqs += 1 if not self._mReversed: i = (self._mLastOrder * SAMPLE_SIZE) + order model = self._mModel['precedenceMatrix'][i] else: # reverse the order of the letters in the lookup i = (order * SAMPLE_SIZE) + self._mLastOrder model = self._mModel['precedenceMatrix'][i] self._mSeqCounters[model] += 1 self._mLastOrder = order if self.get_state() == constants.eDetecting: if self._mTotalSeqs > SB_ENOUGH_REL_THRESHOLD: cf = self.get_confidence() if cf > POSITIVE_SHORTCUT_THRESHOLD: if constants._debug: sys.stderr.write('%s confidence = %s, we have a' 'winner\n' % (self._mModel['charsetName'], cf)) self._mState = constants.eFoundIt elif cf < NEGATIVE_SHORTCUT_THRESHOLD: if constants._debug: sys.stderr.write('%s confidence = %s, below negative' 'shortcut threshhold %s\n' % (self._mModel['charsetName'], cf, NEGATIVE_SHORTCUT_THRESHOLD)) self._mState = constants.eNotMe return self.get_state() def get_confidence(self): r = 0.01 if self._mTotalSeqs > 0: r = ((1.0 * self._mSeqCounters[POSITIVE_CAT]) / self._mTotalSeqs / self._mModel['mTypicalPositiveRatio']) r = r * self._mFreqChar / self._mTotalChar if r >= 1.0: r = 0.99 return r
bfirsh/needle
refs/heads/master
tests/__init__.py
4
import sys from PIL import Image, ImageDraw if sys.version_info >= (3, 0): from io import BytesIO as IOClass else: try: from cStringIO import StringIO as IOClass except ImportError: from StringIO import StringIO as IOClass class ImageTestCaseMixin(object): def get_image(self, colour): return Image.new('RGB', (100, 100), colour) def get_black_image(self): return self.get_image((0, 0, 0)) def get_white_image(self): return self.get_image((255, 255, 255)) def get_half_filled_image(self): im = self.get_black_image() draw = ImageDraw.Draw(im) draw.rectangle( ((0, 0), (49, 100)), fill=(255, 255, 255) ) return im def load_black_div(self, text=''): self.driver.load_html(''' <style type="text/css"> #black-box { position: absolute; left: 50px; top: 100px; width: 100px; height: 100px; background-color: black; color: white; } </style> <div id="black-box">%s</div> ''' % text) def save_image_to_fh(self, im): fh = IOClass() im.save(fh, 'PNG') fh.seek(0) return fh
ahmetyazar/adj-demo
refs/heads/master
fulfillment/urllib3/util/ssl_.py
87
from __future__ import absolute_import import errno import warnings import hmac from binascii import hexlify, unhexlify from hashlib import md5, sha1, sha256 from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning SSLContext = None HAS_SNI = False IS_PYOPENSSL = False IS_SECURETRANSPORT = False # Maps the length of a digest to a possible hash function producing this digest HASHFUNC_MAP = { 32: md5, 40: sha1, 64: sha256, } def _const_compare_digest_backport(a, b): """ Compare two digests of equal length in constant time. The digests must be of type str/bytes. Returns True if the digests match, and False otherwise. """ result = abs(len(a) - len(b)) for l, r in zip(bytearray(a), bytearray(b)): result |= l ^ r return result == 0 _const_compare_digest = getattr(hmac, 'compare_digest', _const_compare_digest_backport) try: # Test for SSL features import ssl from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23 from ssl import HAS_SNI # Has SNI? except ImportError: pass try: from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION except ImportError: OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000 OP_NO_COMPRESSION = 0x20000 # A secure default. # Sources for more information on TLS ciphers: # # - https://wiki.mozilla.org/Security/Server_Side_TLS # - https://www.ssllabs.com/projects/best-practices/index.html # - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/ # # The general intent is: # - Prefer TLS 1.3 cipher suites # - prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE), # - prefer ECDHE over DHE for better performance, # - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and # security, # - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common, # - disable NULL authentication, MD5 MACs and DSS for security reasons. DEFAULT_CIPHERS = ':'.join([ 'TLS13-AES-256-GCM-SHA384', 'TLS13-CHACHA20-POLY1305-SHA256', 'TLS13-AES-128-GCM-SHA256', 'ECDH+AESGCM', 'ECDH+CHACHA20', 'DH+AESGCM', 'DH+CHACHA20', 'ECDH+AES256', 'DH+AES256', 'ECDH+AES128', 'DH+AES', 'RSA+AESGCM', 'RSA+AES', '!aNULL', '!eNULL', '!MD5', ]) try: from ssl import SSLContext # Modern SSL? except ImportError: import sys class SSLContext(object): # Platform-specific: Python 2 & 3.1 supports_set_ciphers = ((2, 7) <= sys.version_info < (3,) or (3, 2) <= sys.version_info) def __init__(self, protocol_version): self.protocol = protocol_version # Use default values from a real SSLContext self.check_hostname = False self.verify_mode = ssl.CERT_NONE self.ca_certs = None self.options = 0 self.certfile = None self.keyfile = None self.ciphers = None def load_cert_chain(self, certfile, keyfile): self.certfile = certfile self.keyfile = keyfile def load_verify_locations(self, cafile=None, capath=None): self.ca_certs = cafile if capath is not None: raise SSLError("CA directories not supported in older Pythons") def set_ciphers(self, cipher_suite): if not self.supports_set_ciphers: raise TypeError( 'Your version of Python does not support setting ' 'a custom cipher suite. Please upgrade to Python ' '2.7, 3.2, or later if you need this functionality.' ) self.ciphers = cipher_suite def wrap_socket(self, socket, server_hostname=None, server_side=False): warnings.warn( 'A true SSLContext object is not available. This prevents ' 'urllib3 from configuring SSL appropriately and may cause ' 'certain SSL connections to fail. You can upgrade to a newer ' 'version of Python to solve this. For more information, see ' 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' '#ssl-warnings', InsecurePlatformWarning ) kwargs = { 'keyfile': self.keyfile, 'certfile': self.certfile, 'ca_certs': self.ca_certs, 'cert_reqs': self.verify_mode, 'ssl_version': self.protocol, 'server_side': server_side, } if self.supports_set_ciphers: # Platform-specific: Python 2.7+ return wrap_socket(socket, ciphers=self.ciphers, **kwargs) else: # Platform-specific: Python 2.6 return wrap_socket(socket, **kwargs) def assert_fingerprint(cert, fingerprint): """ Checks if given fingerprint matches the supplied certificate. :param cert: Certificate as bytes object. :param fingerprint: Fingerprint as string of hexdigits, can be interspersed by colons. """ fingerprint = fingerprint.replace(':', '').lower() digest_length = len(fingerprint) hashfunc = HASHFUNC_MAP.get(digest_length) if not hashfunc: raise SSLError( 'Fingerprint of invalid length: {0}'.format(fingerprint)) # We need encode() here for py32; works on py2 and p33. fingerprint_bytes = unhexlify(fingerprint.encode()) cert_digest = hashfunc(cert).digest() if not _const_compare_digest(cert_digest, fingerprint_bytes): raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".' .format(fingerprint, hexlify(cert_digest))) def resolve_cert_reqs(candidate): """ Resolves the argument to a numeric constant, which can be passed to the wrap_socket function/method from the ssl module. Defaults to :data:`ssl.CERT_NONE`. If given a string it is assumed to be the name of the constant in the :mod:`ssl` module or its abbrevation. (So you can specify `REQUIRED` instead of `CERT_REQUIRED`. If it's neither `None` nor a string we assume it is already the numeric constant which can directly be passed to wrap_socket. """ if candidate is None: return CERT_NONE if isinstance(candidate, str): res = getattr(ssl, candidate, None) if res is None: res = getattr(ssl, 'CERT_' + candidate) return res return candidate def resolve_ssl_version(candidate): """ like resolve_cert_reqs """ if candidate is None: return PROTOCOL_SSLv23 if isinstance(candidate, str): res = getattr(ssl, candidate, None) if res is None: res = getattr(ssl, 'PROTOCOL_' + candidate) return res return candidate def create_urllib3_context(ssl_version=None, cert_reqs=None, options=None, ciphers=None): """All arguments have the same meaning as ``ssl_wrap_socket``. By default, this function does a lot of the same work that ``ssl.create_default_context`` does on Python 3.4+. It: - Disables SSLv2, SSLv3, and compression - Sets a restricted set of server ciphers If you wish to enable SSLv3, you can do:: from urllib3.util import ssl_ context = ssl_.create_urllib3_context() context.options &= ~ssl_.OP_NO_SSLv3 You can do the same to enable compression (substituting ``COMPRESSION`` for ``SSLv3`` in the last line above). :param ssl_version: The desired protocol version to use. This will default to PROTOCOL_SSLv23 which will negotiate the highest protocol that both the server and your installation of OpenSSL support. :param cert_reqs: Whether to require the certificate verification. This defaults to ``ssl.CERT_REQUIRED``. :param options: Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``, ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``. :param ciphers: Which cipher suites to allow the server to select. :returns: Constructed SSLContext object with specified options :rtype: SSLContext """ context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23) # Setting the default here, as we may have no ssl module on import cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs if options is None: options = 0 # SSLv2 is easily broken and is considered harmful and dangerous options |= OP_NO_SSLv2 # SSLv3 has several problems and is now dangerous options |= OP_NO_SSLv3 # Disable compression to prevent CRIME attacks for OpenSSL 1.0+ # (issue #309) options |= OP_NO_COMPRESSION context.options |= options if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6 context.set_ciphers(ciphers or DEFAULT_CIPHERS) context.verify_mode = cert_reqs if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2 # We do our own verification, including fingerprints and alternative # hostnames. So disable it here context.check_hostname = False return context def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, ca_certs=None, server_hostname=None, ssl_version=None, ciphers=None, ssl_context=None, ca_cert_dir=None): """ All arguments except for server_hostname, ssl_context, and ca_cert_dir have the same meaning as they do when using :func:`ssl.wrap_socket`. :param server_hostname: When SNI is supported, the expected hostname of the certificate :param ssl_context: A pre-made :class:`SSLContext` object. If none is provided, one will be created using :func:`create_urllib3_context`. :param ciphers: A string of ciphers we wish the client to support. This is not supported on Python 2.6 as the ssl module does not support it. :param ca_cert_dir: A directory containing CA certificates in multiple separate files, as supported by OpenSSL's -CApath flag or the capath argument to SSLContext.load_verify_locations(). """ context = ssl_context if context is None: # Note: This branch of code and all the variables in it are no longer # used by urllib3 itself. We should consider deprecating and removing # this code. context = create_urllib3_context(ssl_version, cert_reqs, ciphers=ciphers) if ca_certs or ca_cert_dir: try: context.load_verify_locations(ca_certs, ca_cert_dir) except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2 raise SSLError(e) # Py33 raises FileNotFoundError which subclasses OSError # These are not equivalent unless we check the errno attribute except OSError as e: # Platform-specific: Python 3.3 and beyond if e.errno == errno.ENOENT: raise SSLError(e) raise elif getattr(context, 'load_default_certs', None) is not None: # try to load OS default certs; works well on Windows (require Python3.4+) context.load_default_certs() if certfile: context.load_cert_chain(certfile, keyfile) if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI return context.wrap_socket(sock, server_hostname=server_hostname) warnings.warn( 'An HTTPS request has been made, but the SNI (Subject Name ' 'Indication) extension to TLS is not available on this platform. ' 'This may cause the server to present an incorrect TLS ' 'certificate, which can cause validation failures. You can upgrade to ' 'a newer version of Python to solve this. For more information, see ' 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html' '#ssl-warnings', SNIMissingWarning ) return context.wrap_socket(sock)
trishnaguha/ansible
refs/heads/devel
lib/ansible/modules/crypto/acme/acme_certificate_revoke.py
9
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2016 Michael Gruener <michael.gruener@chaosmoon.net> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: acme_certificate_revoke author: "Felix Fontein (@felixfontein)" version_added: "2.7" short_description: Revoke certificates with the ACME protocol description: - "Allows to revoke certificates issued by a CA supporting the L(ACME protocol,https://tools.ietf.org/html/draft-ietf-acme-acme-18), such as L(Let's Encrypt,https://letsencrypt.org/)." notes: - "Exactly one of C(account_key_src), C(account_key_content), C(private_key_src) or C(private_key_content) must be specified." - "Trying to revoke an already revoked certificate should result in an unchanged status, even if the revocation reason was different than the one specified here. Also, depending on the server, it can happen that some other error is returned if the certificate has already been revoked." seealso: - name: The Let's Encrypt documentation description: Documentation for the Let's Encrypt Certification Authority. Provides useful information for example on rate limits. link: https://letsencrypt.org/docs/ - name: Automatic Certificate Management Environment (ACME) description: The current draft specification of the ACME protocol. link: https://tools.ietf.org/html/draft-ietf-acme-acme-18 - module: acme_inspect description: Allows to debug problems. extends_documentation_fragment: - acme options: certificate: description: - "Path to the certificate to revoke." required: yes account_key_src: description: - "Path to a file containing the ACME account RSA or Elliptic Curve key." - "RSA keys can be created with C(openssl rsa ...). Elliptic curve keys can be created with C(openssl ecparam -genkey ...). Any other tool creating private keys in PEM format can be used as well." - "Mutually exclusive with C(account_key_content)." - "Required if C(account_key_content) is not used." account_key_content: description: - "Content of the ACME account RSA or Elliptic Curve key." - "Note that exactly one of C(account_key_src), C(account_key_content), C(private_key_src) or C(private_key_content) must be specified." - "I(Warning): the content will be written into a temporary file, which will be deleted by Ansible when the module completes. Since this is an important private key — it can be used to change the account key, or to revoke your certificates without knowing their private keys —, this might not be acceptable." - "In case C(cryptography) is used, the content is not written into a temporary file. It can still happen that it is written to disk by Ansible in the process of moving the module with its argument to the node where it is executed." private_key_src: description: - "Path to the certificate's private key." - "Note that exactly one of C(account_key_src), C(account_key_content), C(private_key_src) or C(private_key_content) must be specified." private_key_content: description: - "Content of the certificate's private key." - "Note that exactly one of C(account_key_src), C(account_key_content), C(private_key_src) or C(private_key_content) must be specified." - "I(Warning): the content will be written into a temporary file, which will be deleted by Ansible when the module completes. Since this is an important private key — it can be used to change the account key, or to revoke your certificates without knowing their private keys —, this might not be acceptable." - "In case C(cryptography) is used, the content is not written into a temporary file. It can still happen that it is written to disk by Ansible in the process of moving the module with its argument to the node where it is executed." revoke_reason: description: - "One of the revocation reasonCodes defined in L(https://tools.ietf.org/html/rfc5280#section-5.3.1, Section 5.3.1 of RFC5280)." - "Possible values are C(0) (unspecified), C(1) (keyCompromise), C(2) (cACompromise), C(3) (affiliationChanged), C(4) (superseded), C(5) (cessationOfOperation), C(6) (certificateHold), C(8) (removeFromCRL), C(9) (privilegeWithdrawn), C(10) (aACompromise)" ''' EXAMPLES = ''' - name: Revoke certificate with account key acme_certificate_revoke: account_key_src: /etc/pki/cert/private/account.key certificate: /etc/httpd/ssl/sample.com.crt - name: Revoke certificate with certificate's private key acme_certificate_revoke: private_key_src: /etc/httpd/ssl/sample.com.key certificate: /etc/httpd/ssl/sample.com.crt ''' RETURN = ''' ''' from ansible.module_utils.acme import ( ModuleFailException, ACMEAccount, nopad_b64, pem_to_der, set_crypto_backend, ) from ansible.module_utils.basic import AnsibleModule def main(): module = AnsibleModule( argument_spec=dict( account_key_src=dict(type='path', aliases=['account_key']), account_key_content=dict(type='str', no_log=True), account_uri=dict(required=False, type='str'), acme_directory=dict(required=False, default='https://acme-staging.api.letsencrypt.org/directory', type='str'), acme_version=dict(required=False, default=1, choices=[1, 2], type='int'), validate_certs=dict(required=False, default=True, type='bool'), private_key_src=dict(type='path'), private_key_content=dict(type='str', no_log=True), certificate=dict(required=True, type='path'), revoke_reason=dict(required=False, type='int'), select_crypto_backend=dict(required=False, choices=['auto', 'openssl', 'cryptography'], default='auto', type='str'), ), required_one_of=( ['account_key_src', 'account_key_content', 'private_key_src', 'private_key_content'], ), mutually_exclusive=( ['account_key_src', 'account_key_content', 'private_key_src', 'private_key_content'], ), supports_check_mode=False, ) set_crypto_backend(module) if not module.params.get('validate_certs'): module.warn(warning='Disabling certificate validation for communications with ACME endpoint. ' + 'This should only be done for testing against a local ACME server for ' + 'development purposes, but *never* for production purposes.') try: account = ACMEAccount(module) # Load certificate certificate = pem_to_der(module.params.get('certificate')) certificate = nopad_b64(certificate) # Construct payload payload = { 'certificate': certificate } if module.params.get('revoke_reason') is not None: payload['reason'] = module.params.get('revoke_reason') # Determine endpoint if module.params.get('acme_version') == 1: endpoint = account.directory['revoke-cert'] payload['resource'] = 'revoke-cert' else: endpoint = account.directory['revokeCert'] # Get hold of private key (if available) and make sure it comes from disk private_key = module.params.get('private_key_src') private_key_content = module.params.get('private_key_content') # Revoke certificate if private_key or private_key_content: # Step 1: load and parse private key error, private_key_data = account.parse_key(private_key, private_key_content) if error: raise ModuleFailException("error while parsing private key: %s" % error) # Step 2: sign revokation request with private key jws_header = { "alg": private_key_data['alg'], "jwk": private_key_data['jwk'], } result, info = account.send_signed_request(endpoint, payload, key_data=private_key_data, jws_header=jws_header) else: # Step 1: get hold of account URI created, account_data = account.setup_account(allow_creation=False) if created: raise AssertionError('Unwanted account creation') if account_data is None: raise ModuleFailException(msg='Account does not exist or is deactivated.') # Step 2: sign revokation request with account key result, info = account.send_signed_request(endpoint, payload) if info['status'] != 200: already_revoked = False # Standarized error from draft 14 on (https://tools.ietf.org/html/draft-ietf-acme-acme-18#section-7.6) if result.get('type') == 'urn:ietf:params:acme:error:alreadyRevoked': already_revoked = True else: # Hack for Boulder errors if module.params.get('acme_version') == 1: error_type = 'urn:acme:error:malformed' else: error_type = 'urn:ietf:params:acme:error:malformed' if result.get('type') == error_type and result.get('detail') == 'Certificate already revoked': # Fallback: boulder returns this in case the certificate was already revoked. already_revoked = True # If we know the certificate was already revoked, we don't fail, # but successfully terminate while indicating no change if already_revoked: module.exit_json(changed=False) raise ModuleFailException('Error revoking certificate: {0} {1}'.format(info['status'], result)) module.exit_json(changed=True) except ModuleFailException as e: e.do_fail(module) if __name__ == '__main__': main()
sbidoul/buildbot
refs/heads/master
master/buildbot/test/unit/test_steps_source_bzr.py
9
# This file is part of Buildbot. Buildbot is free software: you can # redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation, version 2. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright Buildbot Team Members from __future__ import absolute_import from __future__ import print_function import os from twisted.internet import error from twisted.python.reflect import namedModule from twisted.trial import unittest from buildbot.process import remotetransfer from buildbot.process.results import FAILURE from buildbot.process.results import RETRY from buildbot.process.results import SUCCESS from buildbot.steps.source import bzr from buildbot.test.fake.remotecommand import Expect from buildbot.test.fake.remotecommand import ExpectRemoteRef from buildbot.test.fake.remotecommand import ExpectShell from buildbot.test.util import sourcesteps class TestBzr(sourcesteps.SourceStepMixin, unittest.TestCase): def setUp(self): return self.setUpSourceStep() def tearDown(self): return self.tearDownSourceStep() def test_mode_full(self): self.setupStep( bzr.Bzr(repourl='http://bzr.squid-cache.org/bzr/squid3/trunk', mode='full', method='fresh')) self.expectCommands( ExpectShell(workdir='wkdir', command=['bzr', '--version']) + 0, Expect('stat', dict(file='wkdir/.buildbot-patched', logEnviron=True)) + 1, Expect('stat', dict(file='wkdir/.bzr', logEnviron=True)) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'clean-tree', '--force']) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'update']) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'version-info', '--custom', "--template='{revno}"]) + ExpectShell.log('stdio', stdout='100') + 0, ) self.expectOutcome(result=SUCCESS, state_string="update") self.expectProperty('got_revision', '100', 'Bzr') return self.runStep() def test_mode_full_win32path(self): self.setupStep( bzr.Bzr(repourl='http://bzr.squid-cache.org/bzr/squid3/trunk', mode='full', method='fresh')) self.build.path_module = namedModule('ntpath') self.expectCommands( ExpectShell(workdir='wkdir', command=['bzr', '--version']) + 0, Expect('stat', dict(file=r'wkdir\.buildbot-patched', logEnviron=True)) + 1, Expect('stat', dict(file=r'wkdir\.bzr', logEnviron=True)) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'clean-tree', '--force']) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'update']) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'version-info', '--custom', "--template='{revno}"]) + ExpectShell.log('stdio', stdout='100') + 0, ) self.expectOutcome(result=SUCCESS) return self.runStep() def test_mode_full_timeout(self): self.setupStep( bzr.Bzr(repourl='http://bzr.squid-cache.org/bzr/squid3/trunk', mode='full', method='fresh', timeout=1)) self.expectCommands( ExpectShell(workdir='wkdir', timeout=1, command=['bzr', '--version']) + 0, Expect('stat', dict(file='wkdir/.buildbot-patched', logEnviron=True)) + 1, Expect('stat', dict(file='wkdir/.bzr', logEnviron=True)) + 0, ExpectShell(workdir='wkdir', timeout=1, command=['bzr', 'clean-tree', '--force']) + 0, ExpectShell(workdir='wkdir', timeout=1, command=['bzr', 'update']) + 0, ExpectShell(workdir='wkdir', timeout=1, command=['bzr', 'version-info', '--custom', "--template='{revno}"]) + ExpectShell.log('stdio', stdout='100') + 0, ) self.expectOutcome(result=SUCCESS) self.expectProperty('got_revision', '100', 'Bzr') return self.runStep() def test_mode_full_revision(self): self.setupStep( bzr.Bzr(repourl='http://bzr.squid-cache.org/bzr/squid3/trunk', mode='full', method='fresh'), args=dict(revision='3730')) self.expectCommands( ExpectShell(workdir='wkdir', command=['bzr', '--version']) + 0, Expect('stat', dict(file='wkdir/.buildbot-patched', logEnviron=True)) + 1, Expect('stat', dict(file='wkdir/.bzr', logEnviron=True)) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'clean-tree', '--force']) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'update', '-r', '3730']) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'version-info', '--custom', "--template='{revno}"]) + ExpectShell.log('stdio', stdout='100') + 0, ) self.expectOutcome(result=SUCCESS) self.expectProperty('got_revision', '100', 'Bzr') return self.runStep() def test_mode_full_clean(self): self.setupStep( bzr.Bzr(repourl='http://bzr.squid-cache.org/bzr/squid3/trunk', mode='full', method='clean')) self.expectCommands( ExpectShell(workdir='wkdir', command=['bzr', '--version']) + 0, Expect('stat', dict(file='wkdir/.buildbot-patched', logEnviron=True)) + 1, Expect('stat', dict(file='wkdir/.bzr', logEnviron=True)) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'clean-tree', '--ignored', '--force']) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'update']) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'version-info', '--custom', "--template='{revno}"]) + ExpectShell.log('stdio', stdout='100') + 0, ) self.expectOutcome(result=SUCCESS) self.expectProperty('got_revision', '100', 'Bzr') return self.runStep() def test_mode_full_clean_patched(self): self.setupStep( bzr.Bzr(repourl='http://bzr.squid-cache.org/bzr/squid3/trunk', mode='full', method='clean')) self.expectCommands( ExpectShell(workdir='wkdir', command=['bzr', '--version']) + 0, Expect('stat', dict(file='wkdir/.buildbot-patched', logEnviron=True)) + 0, # clean up the applied patch ExpectShell(workdir='wkdir', command=['bzr', 'clean-tree', '--ignored', '--force']) + 0, Expect('stat', dict(file='wkdir/.bzr', logEnviron=True)) + 0, # this clean is from 'mode=clean' ExpectShell(workdir='wkdir', command=['bzr', 'clean-tree', '--ignored', '--force']) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'update']) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'version-info', '--custom', "--template='{revno}"]) + ExpectShell.log('stdio', stdout='100') + 0, ) self.expectOutcome(result=SUCCESS) self.expectProperty('got_revision', '100', 'Bzr') return self.runStep() def test_mode_full_clean_patch(self): self.setupStep( bzr.Bzr(repourl='http://bzr.squid-cache.org/bzr/squid3/trunk', mode='full', method='clean'), patch=(1, 'patch')) self.expectCommands( ExpectShell(workdir='wkdir', command=['bzr', '--version']) + 0, Expect('stat', dict(file='wkdir/.buildbot-patched', logEnviron=True)) + 1, Expect('stat', dict(file='wkdir/.bzr', logEnviron=True)) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'clean-tree', '--ignored', '--force']) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'update']) + 0, Expect('downloadFile', dict(blocksize=16384, maxsize=None, reader=ExpectRemoteRef( remotetransfer.FileReader), workerdest='.buildbot-diff', workdir='wkdir', mode=None)) + 0, Expect('downloadFile', dict(blocksize=16384, maxsize=None, reader=ExpectRemoteRef( remotetransfer.FileReader), workerdest='.buildbot-patched', workdir='wkdir', mode=None)) + 0, ExpectShell(workdir='wkdir', command=['patch', '-p1', '--remove-empty-files', '--force', '--forward', '-i', '.buildbot-diff']) + 0, Expect('rmdir', dict(dir='wkdir/.buildbot-diff', logEnviron=True)) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'version-info', '--custom', "--template='{revno}"]) + ExpectShell.log('stdio', stdout='100') + 0, ) self.expectOutcome(result=SUCCESS) self.expectProperty('got_revision', '100', 'Bzr') return self.runStep() def test_mode_full_clean_patch_worker_2_16(self): self.setupStep( bzr.Bzr(repourl='http://bzr.squid-cache.org/bzr/squid3/trunk', mode='full', method='clean'), patch=(1, 'patch'), worker_version={'*': '2.16'}) self.expectCommands( ExpectShell(workdir='wkdir', command=['bzr', '--version']) + 0, Expect('stat', dict(file='wkdir/.buildbot-patched', logEnviron=True)) + 1, Expect('stat', dict(file='wkdir/.bzr', logEnviron=True)) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'clean-tree', '--ignored', '--force']) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'update']) + 0, Expect('downloadFile', dict(blocksize=16384, maxsize=None, reader=ExpectRemoteRef( remotetransfer.FileReader), slavedest='.buildbot-diff', workdir='wkdir', mode=None)) + 0, Expect('downloadFile', dict(blocksize=16384, maxsize=None, reader=ExpectRemoteRef( remotetransfer.FileReader), slavedest='.buildbot-patched', workdir='wkdir', mode=None)) + 0, ExpectShell(workdir='wkdir', command=['patch', '-p1', '--remove-empty-files', '--force', '--forward', '-i', '.buildbot-diff']) + 0, Expect('rmdir', dict(dir='wkdir/.buildbot-diff', logEnviron=True)) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'version-info', '--custom', "--template='{revno}"]) + ExpectShell.log('stdio', stdout='100') + 0, ) self.expectOutcome(result=SUCCESS) self.expectProperty('got_revision', '100', 'Bzr') return self.runStep() def test_mode_full_clean_revision(self): self.setupStep( bzr.Bzr(repourl='http://bzr.squid-cache.org/bzr/squid3/trunk', mode='full', method='clean'), args=dict(revision='2345')) self.expectCommands( ExpectShell(workdir='wkdir', command=['bzr', '--version']) + 0, Expect('stat', dict(file='wkdir/.buildbot-patched', logEnviron=True)) + 1, Expect('stat', dict(file='wkdir/.bzr', logEnviron=True)) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'clean-tree', '--ignored', '--force']) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'update', '-r', '2345']) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'version-info', '--custom', "--template='{revno}"]) + ExpectShell.log('stdio', stdout='100') + 0, ) self.expectOutcome(result=SUCCESS) self.expectProperty('got_revision', '100', 'Bzr') return self.runStep() def test_mode_full_fresh(self): self.setupStep( bzr.Bzr(repourl='http://bzr.squid-cache.org/bzr/squid3/trunk', mode='full', method='fresh')) self.expectCommands( ExpectShell(workdir='wkdir', command=['bzr', '--version']) + 0, Expect('stat', dict(file='wkdir/.buildbot-patched', logEnviron=True)) + 1, Expect('stat', dict(file='wkdir/.bzr', logEnviron=True)) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'clean-tree', '--force']) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'update']) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'version-info', '--custom', "--template='{revno}"]) + ExpectShell.log('stdio', stdout='100') + 0, ) self.expectOutcome(result=SUCCESS) self.expectProperty('got_revision', '100', 'Bzr') return self.runStep() def test_mode_full_clobber(self): self.setupStep( bzr.Bzr(repourl='http://bzr.squid-cache.org/bzr/squid3/trunk', mode='full', method='clobber')) self.expectCommands( ExpectShell(workdir='wkdir', command=['bzr', '--version']) + 0, Expect('stat', dict(file='wkdir/.buildbot-patched', logEnviron=True)) + 1, Expect('rmdir', dict(dir='wkdir', logEnviron=True)) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'checkout', 'http://bzr.squid-cache.org/bzr/squid3/trunk', '.']) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'version-info', '--custom', "--template='{revno}"]) + ExpectShell.log('stdio', stdout='100') + 0, ) self.expectOutcome(result=SUCCESS) self.expectProperty('got_revision', '100', 'Bzr') return self.runStep() def test_mode_full_clobber_retry(self): self.setupStep( bzr.Bzr(repourl='http://bzr.squid-cache.org/bzr/squid3/trunk', mode='full', method='clobber', retry=(0, 2))) self.expectCommands( ExpectShell(workdir='wkdir', command=['bzr', '--version']) + 0, Expect('stat', dict(file='wkdir/.buildbot-patched', logEnviron=True)) + 1, Expect('rmdir', dict(dir='wkdir', logEnviron=True)) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'checkout', 'http://bzr.squid-cache.org/bzr/squid3/trunk', '.']) + 1, Expect('rmdir', dict(dir='wkdir', logEnviron=True)) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'checkout', 'http://bzr.squid-cache.org/bzr/squid3/trunk', '.']) + 1, Expect('rmdir', dict(dir='wkdir', logEnviron=True)) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'checkout', 'http://bzr.squid-cache.org/bzr/squid3/trunk', '.']) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'version-info', '--custom', "--template='{revno}"]) + ExpectShell.log('stdio', stdout='100') + 0, ) self.expectOutcome(result=SUCCESS) self.expectProperty('got_revision', '100', 'Bzr') return self.runStep() def test_mode_full_clobber_revision(self): self.setupStep( bzr.Bzr(repourl='http://bzr.squid-cache.org/bzr/squid3/trunk', mode='full', method='clobber'), args=dict(revision='3730')) self.expectCommands( ExpectShell(workdir='wkdir', command=['bzr', '--version']) + 0, Expect('stat', dict(file='wkdir/.buildbot-patched', logEnviron=True)) + 1, Expect('rmdir', dict(dir='wkdir', logEnviron=True)) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'checkout', 'http://bzr.squid-cache.org/bzr/squid3/trunk', '.', '-r', '3730']) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'version-info', '--custom', "--template='{revno}"]) + ExpectShell.log('stdio', stdout='100') + 0, ) self.expectOutcome(result=SUCCESS) self.expectProperty('got_revision', '100', 'Bzr') return self.runStep() def test_mode_full_clobber_baseurl(self): self.setupStep( bzr.Bzr(baseURL='http://bzr.squid-cache.org/bzr/squid3', defaultBranch='trunk', mode='full', method='clobber')) self.expectCommands( ExpectShell(workdir='wkdir', command=['bzr', '--version']) + 0, Expect('stat', dict(file='wkdir/.buildbot-patched', logEnviron=True)) + 1, Expect('rmdir', dict(dir='wkdir', logEnviron=True)) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'checkout', os.path.join('http://bzr.squid-cache.org/bzr/squid3', 'trunk'), '.']) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'version-info', '--custom', "--template='{revno}"]) + ExpectShell.log('stdio', stdout='100') + 0, ) self.expectOutcome(result=SUCCESS) self.expectProperty('got_revision', '100', 'Bzr') return self.runStep() def test_mode_full_clobber_baseurl_nodefault(self): self.setupStep( bzr.Bzr(baseURL='http://bzr.squid-cache.org/bzr/squid3', defaultBranch='trunk', mode='full', method='clobber'), args=dict(branch='branches/SQUID_3_0')) self.expectCommands( ExpectShell(workdir='wkdir', command=['bzr', '--version']) + 0, Expect('stat', dict(file='wkdir/.buildbot-patched', logEnviron=True)) + 1, Expect('rmdir', dict(dir='wkdir', logEnviron=True)) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'checkout', os.path.join('http://bzr.squid-cache.org/bzr/squid3', 'branches/SQUID_3_0'), '.']) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'version-info', '--custom', "--template='{revno}"]) + ExpectShell.log('stdio', stdout='100') + 0, ) self.expectOutcome(result=SUCCESS) self.expectProperty('got_revision', '100', 'Bzr') return self.runStep() def test_mode_full_copy(self): self.setupStep( bzr.Bzr(repourl='http://bzr.squid-cache.org/bzr/squid3/trunk', mode='full', method='copy')) self.expectCommands( ExpectShell(workdir='wkdir', command=['bzr', '--version']) + 0, Expect('stat', dict(file='wkdir/.buildbot-patched', logEnviron=True)) + 1, Expect('rmdir', dict(dir='build', logEnviron=True)) + 0, Expect('stat', dict(file='source/.bzr', logEnviron=True)) + 0, ExpectShell(workdir='source', command=['bzr', 'update']) + 0, Expect('cpdir', {'fromdir': 'source', 'logEnviron': True, 'todir': 'build'}) + 0, ExpectShell(workdir='source', command=['bzr', 'version-info', '--custom', "--template='{revno}"]) + ExpectShell.log('stdio', stdout='100') + 0, ) self.expectOutcome(result=SUCCESS) self.expectProperty('got_revision', '100', 'Bzr') return self.runStep() def test_mode_incremental(self): self.setupStep( bzr.Bzr(repourl='http://bzr.squid-cache.org/bzr/squid3/trunk', mode='incremental')) self.expectCommands( ExpectShell(workdir='wkdir', command=['bzr', '--version']) + 0, Expect('stat', dict(file='wkdir/.buildbot-patched', logEnviron=True)) + 1, Expect('stat', dict(file='wkdir/.bzr', logEnviron=True)) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'update']) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'version-info', '--custom', "--template='{revno}"]) + ExpectShell.log('stdio', stdout='100') + 0, ) self.expectOutcome(result=SUCCESS) self.expectProperty('got_revision', '100', 'Bzr') return self.runStep() def test_mode_incremental_revision(self): self.setupStep( bzr.Bzr(repourl='http://bzr.squid-cache.org/bzr/squid3/trunk', mode='incremental'), args=dict(revision='9384')) self.expectCommands( ExpectShell(workdir='wkdir', command=['bzr', '--version']) + 0, Expect('stat', dict(file='wkdir/.buildbot-patched', logEnviron=True)) + 1, Expect('stat', dict(file='wkdir/.bzr', logEnviron=True)) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'update', '-r', '9384']) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'version-info', '--custom', "--template='{revno}"]) + ExpectShell.log('stdio', stdout='100') + 0, ) self.expectOutcome(result=SUCCESS) self.expectProperty('got_revision', '100', 'Bzr') return self.runStep() def test_mode_incremental_no_existing_repo(self): self.setupStep( bzr.Bzr(repourl='http://bzr.squid-cache.org/bzr/squid3/trunk', mode='incremental')) self.expectCommands( ExpectShell(workdir='wkdir', command=['bzr', '--version']) + 0, Expect('stat', dict(file='wkdir/.buildbot-patched', logEnviron=True)) + 1, Expect('stat', dict(file='wkdir/.bzr', logEnviron=True)) + 1, ExpectShell(workdir='wkdir', command=['bzr', 'checkout', 'http://bzr.squid-cache.org/bzr/squid3/trunk', '.']) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'version-info', '--custom', "--template='{revno}"]) + ExpectShell.log('stdio', stdout='100\n') + 0, ) self.expectOutcome(result=SUCCESS) self.expectProperty('got_revision', '100\n', 'Bzr') return self.runStep() def test_mode_incremental_retry(self): self.setupStep( bzr.Bzr(repourl='http://bzr.squid-cache.org/bzr/squid3/trunk', mode='incremental', retry=(0, 1))) self.expectCommands( ExpectShell(workdir='wkdir', command=['bzr', '--version']) + 0, Expect('stat', dict(file='wkdir/.buildbot-patched', logEnviron=True)) + 1, Expect('stat', dict(file='wkdir/.bzr', logEnviron=True)) + 1, ExpectShell(workdir='wkdir', command=['bzr', 'checkout', 'http://bzr.squid-cache.org/bzr/squid3/trunk', '.']) + 1, Expect('rmdir', dict(dir='wkdir', logEnviron=True)) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'checkout', 'http://bzr.squid-cache.org/bzr/squid3/trunk', '.']) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'version-info', '--custom', "--template='{revno}"]) + ExpectShell.log('stdio', stdout='100\n') + 0, ) self.expectOutcome(result=SUCCESS) self.expectProperty('got_revision', '100\n', 'Bzr') return self.runStep() def test_bad_revparse(self): self.setupStep( bzr.Bzr(repourl='http://bzr.squid-cache.org/bzr/squid3/trunk', mode='incremental')) self.expectCommands( ExpectShell(workdir='wkdir', command=['bzr', '--version']) + 0, Expect('stat', dict(file='wkdir/.buildbot-patched', logEnviron=True)) + 1, Expect('stat', dict(file='wkdir/.bzr', logEnviron=True)) + 1, ExpectShell(workdir='wkdir', command=['bzr', 'checkout', 'http://bzr.squid-cache.org/bzr/squid3/trunk', '.']) + 0, ExpectShell(workdir='wkdir', command=['bzr', 'version-info', '--custom', "--template='{revno}"]) + ExpectShell.log('stdio', stdout='oiasdfj010laksjfd') + 0, ) self.expectOutcome(result=FAILURE) return self.runStep() def test_bad_checkout(self): self.setupStep( bzr.Bzr(repourl='http://bzr.squid-cache.org/bzr/squid3/trunk', mode='incremental')) self.expectCommands( ExpectShell(workdir='wkdir', command=['bzr', '--version']) + 0, Expect('stat', dict(file='wkdir/.buildbot-patched', logEnviron=True)) + 1, Expect('stat', dict(file='wkdir/.bzr', logEnviron=True)) + 1, ExpectShell(workdir='wkdir', command=['bzr', 'checkout', 'http://bzr.squid-cache.org/bzr/squid3/trunk', '.']) + ExpectShell.log('stdio', stderr='failed\n') + 128, ) self.expectOutcome(result=FAILURE) return self.runStep() def test_worker_connection_lost(self): self.setupStep( bzr.Bzr(repourl='http://bzr.squid-cache.org/bzr/squid3/trunk', mode='full', method='fresh')) self.expectCommands( ExpectShell(workdir='wkdir', command=['bzr', '--version']) + ('err', error.ConnectionLost()), ) self.expectOutcome(result=RETRY, state_string="update (retry)") return self.runStep()
emptyewer/DEEPN
refs/heads/master
listen.py
1
import socket client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) while 1: try: client_socket.connect(("localhost", 1599)) data = raw_input ( "Enter text to be upper-cased, q to quit\n" ) client_socket.send(data) if ( data == 'q' or data == 'Q'): client_socket.close() break; else: data = client_socket.recv(5000) print "Your upper cased text: " , data client_socket.close() except: pass
tiramiseb/awesomeshop
refs/heads/master
back/shop/__init__.py
2
# -*- coding: utf8 -*- # Copyright 2015 Sébastien Maccagnoni # # This file is part of AwesomeShop. # # AwesomeShop is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # AwesomeShop is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more # details. # # You should have received a copy of the GNU Affero General Public License # along with AwesomeShop. If not, see <http://www.gnu.org/licenses/>.
odooindia/odoo
refs/heads/master
addons/marketing_campaign/__openerp__.py
67
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Marketing Campaigns', 'version': '1.1', 'depends': ['marketing', 'document', 'email_template', 'decimal_precision' ], 'author': 'OpenERP SA', 'category': 'Marketing', 'description': """ This module provides leads automation through marketing campaigns (campaigns can in fact be defined on any resource, not just CRM Leads). ========================================================================================================================================= The campaigns are dynamic and multi-channels. The process is as follows: ------------------------------------------------------------------------ * Design marketing campaigns like workflows, including email templates to send, reports to print and send by email, custom actions * Define input segments that will select the items that should enter the campaign (e.g leads from certain countries.) * Run you campaign in simulation mode to test it real-time or accelerated, and fine-tune it * You may also start the real campaign in manual mode, where each action requires manual validation * Finally launch your campaign live, and watch the statistics as the campaign does everything fully automatically. While the campaign runs you can of course continue to fine-tune the parameters, input segments, workflow. **Note:** If you need demo data, you can install the marketing_campaign_crm_demo module, but this will also install the CRM application as it depends on CRM Leads. """, 'website': 'http://www.openerp.com', 'data': [ 'marketing_campaign_view.xml', 'marketing_campaign_data.xml', 'marketing_campaign_workflow.xml', 'report/campaign_analysis_view.xml', 'security/marketing_campaign_security.xml', 'security/ir.model.access.csv' ], 'demo': ['marketing_campaign_demo.xml'], 'test': ['test/marketing_campaign.yml'], 'installable': True, 'auto_install': False, 'images': ['images/campaign.png', 'images/campaigns.jpeg','images/email_account.jpeg','images/email_templates.jpeg','images/segments.jpeg'], } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
adiog/dotfiles
refs/heads/master
.bin/fastcc.py
1
#!/usr/bin/python3 import os from re import search from fileinput import input stl = { 'algorithm': ['max', 'min', 'sort'], 'numeric': ['accumulate'], 'array': ['array'], 'bitset': ['bitset'], 'iostream': ['cin', 'cout'], 'map': ['map'], 'memory': ['make_shared', 'make_unique', 'shared_ptr', 'unique_ptr'], 'queue': ['queue', 'priority_queue'], 'stack': ['stack'], 'utility': ['move', 'pair'], 'vector': ['vector'], 'regex': ['regex'], 'set': ['set'], 'unordered_set': ['unordered_set'], } cinvector = """ template <typename T> istream &operator>>(istream &is, vector<T> &v) { for (auto &e : v) { is >> e; } return is; }""" cinarray = """ template <typename T> istream &operator>>(istream &is, array<T> &a) { for (auto &e : a) { is >> e; } return is; }""" if __name__ == '__main__': content = '' for line in input(): content = content + line use_stl = {} for lib in stl: use_lib = False for keyword in stl[lib]: if search(keyword, content): use_lib = True break if use_lib: print("#include <{}>".format(lib)) use_stl[lib] = use_lib print("using namespace std;") if use_stl['iostream'] and use_stl['vector']: print(cinvector) if use_stl['iostream'] and use_stl['array']: print(cinarray) print("int main() {") print(content) print("return 0;") print("}")
rlucioni/rotations
refs/heads/master
rotations/tests/conftest.py
1
from pytest_factoryboy import register from rotations.tests.factories import MemberFactory, RotationFactory register(MemberFactory) register(RotationFactory)
jaywreddy/django
refs/heads/master
django/contrib/auth/handlers/modwsgi.py
537
from django import db from django.contrib import auth from django.utils.encoding import force_bytes def check_password(environ, username, password): """ Authenticates against Django's auth database mod_wsgi docs specify None, True, False as return value depending on whether the user exists and authenticates. """ UserModel = auth.get_user_model() # db connection state is managed similarly to the wsgi handler # as mod_wsgi may call these functions outside of a request/response cycle db.reset_queries() try: try: user = UserModel._default_manager.get_by_natural_key(username) except UserModel.DoesNotExist: return None if not user.is_active: return None return user.check_password(password) finally: db.close_old_connections() def groups_for_user(environ, username): """ Authorizes a user based on groups """ UserModel = auth.get_user_model() db.reset_queries() try: try: user = UserModel._default_manager.get_by_natural_key(username) except UserModel.DoesNotExist: return [] if not user.is_active: return [] return [force_bytes(group.name) for group in user.groups.all()] finally: db.close_old_connections()
jylaxp/django
refs/heads/master
tests/template_tests/filter_tests/test_escapejs.py
324
from __future__ import unicode_literals from django.template.defaultfilters import escapejs_filter from django.test import SimpleTestCase from ..utils import setup class EscapejsTests(SimpleTestCase): @setup({'escapejs01': '{{ a|escapejs }}'}) def test_escapejs01(self): output = self.engine.render_to_string('escapejs01', {'a': 'testing\r\njavascript \'string" <b>escaping</b>'}) self.assertEqual(output, 'testing\\u000D\\u000Ajavascript ' '\\u0027string\\u0022 \\u003Cb\\u003E' 'escaping\\u003C/b\\u003E') @setup({'escapejs02': '{% autoescape off %}{{ a|escapejs }}{% endautoescape %}'}) def test_escapejs02(self): output = self.engine.render_to_string('escapejs02', {'a': 'testing\r\njavascript \'string" <b>escaping</b>'}) self.assertEqual(output, 'testing\\u000D\\u000Ajavascript ' '\\u0027string\\u0022 \\u003Cb\\u003E' 'escaping\\u003C/b\\u003E') class FunctionTests(SimpleTestCase): def test_quotes(self): self.assertEqual( escapejs_filter('"double quotes" and \'single quotes\''), '\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027', ) def test_backslashes(self): self.assertEqual(escapejs_filter(r'\ : backslashes, too'), '\\u005C : backslashes, too') def test_whitespace(self): self.assertEqual( escapejs_filter('and lots of whitespace: \r\n\t\v\f\b'), 'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008', ) def test_script(self): self.assertEqual( escapejs_filter(r'<script>and this</script>'), '\\u003Cscript\\u003Eand this\\u003C/script\\u003E', ) def test_paragraph_separator(self): self.assertEqual( escapejs_filter('paragraph separator:\u2029and line separator:\u2028'), 'paragraph separator:\\u2029and line separator:\\u2028', )
Manojkumar91/odoo_inresto
refs/heads/master
addons/base_vat/base_vat.py
2
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2012 OpenERP SA (<http://openerp.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import logging import string import datetime import re _logger = logging.getLogger(__name__) try: import vatnumber except ImportError: _logger.warning("VAT validation partially unavailable because the `vatnumber` Python library cannot be found. " "Install it to support more countries, for example with `easy_install vatnumber`.") vatnumber = None from openerp.osv import fields, osv from openerp.tools.misc import ustr from openerp.tools.translate import _ from openerp.exceptions import UserError _ref_vat = { 'at': 'ATU12345675', 'be': 'BE0477472701', 'bg': 'BG1234567892', 'ch': 'CHE-123.456.788 TVA or CH TVA 123456', #Swiss by Yannick Vaucher @ Camptocamp 'cy': 'CY12345678F', 'cz': 'CZ12345679', 'de': 'DE123456788', 'dk': 'DK12345674', 'ee': 'EE123456780', 'el': 'EL12345670', 'es': 'ESA12345674', 'fi': 'FI12345671', 'fr': 'FR32123456789', 'gb': 'GB123456782', 'gr': 'GR12345670', 'hu': 'HU12345676', 'hr': 'HR01234567896', # Croatia, contributed by Milan Tribuson 'ie': 'IE1234567FA', 'it': 'IT12345670017', 'lt': 'LT123456715', 'lu': 'LU12345613', 'lv': 'LV41234567891', 'mt': 'MT12345634', 'mx': 'MXABC123456T1B', 'nl': 'NL123456782B90', 'no': 'NO123456785', 'pe': 'PER10254824220 or PED10254824220', 'pl': 'PL1234567883', 'pt': 'PT123456789', 'ro': 'RO1234567897', 'se': 'SE123456789701', 'si': 'SI12345679', 'sk': 'SK0012345675', 'tr': 'TR1234567890 (VERGINO) veya TR12345678901 (TCKIMLIKNO)' # Levent Karakas @ Eska Yazilim A.S. } class res_partner(osv.osv): _inherit = 'res.partner' def _split_vat(self, vat): vat_country, vat_number = vat[:2].lower(), vat[2:].replace(' ', '') return vat_country, vat_number def simple_vat_check(self, cr, uid, country_code, vat_number, context=None): ''' Check the VAT number depending of the country. http://sima-pc.com/nif.php ''' if not ustr(country_code).encode('utf-8').isalpha(): return False check_func_name = 'check_vat_' + country_code check_func = getattr(self, check_func_name, None) or \ getattr(vatnumber, check_func_name, None) if not check_func: # No VAT validation available, default to check that the country code exists if country_code.upper() == 'EU': # Foreign companies that trade with non-enterprises in the EU # may have a VATIN starting with "EU" instead of a country code. return True res_country = self.pool.get('res.country') return bool(res_country.search(cr, uid, [('code', '=ilike', country_code)], context=context)) return check_func(vat_number) def vies_vat_check(self, cr, uid, country_code, vat_number, context=None): try: # Validate against VAT Information Exchange System (VIES) # see also http://ec.europa.eu/taxation_customs/vies/ return vatnumber.check_vies(country_code.upper()+vat_number) except Exception: # see http://ec.europa.eu/taxation_customs/vies/checkVatService.wsdl # Fault code may contain INVALID_INPUT, SERVICE_UNAVAILABLE, MS_UNAVAILABLE, # TIMEOUT or SERVER_BUSY. There is no way we can validate the input # with VIES if any of these arise, including the first one (it means invalid # country code or empty VAT number), so we fall back to the simple check. return self.simple_vat_check(cr, uid, country_code, vat_number, context=context) def button_check_vat(self, cr, uid, ids, context=None): if not self.check_vat(cr, uid, ids, context=context): msg = self._construct_constraint_msg(cr, uid, ids, context=context) raise UserError(msg) return True def check_vat(self, cr, uid, ids, context=None): user_company = self.pool.get('res.users').browse(cr, uid, uid).company_id if user_company.vat_check_vies: # force full VIES online check check_func = self.vies_vat_check else: # quick and partial off-line checksum validation check_func = self.simple_vat_check for partner in self.browse(cr, uid, ids, context=context): if not partner.vat: continue vat_country, vat_number = self._split_vat(partner.vat) if not check_func(cr, uid, vat_country, vat_number, context=context): _logger.info(_("Importing VAT Number [%s] is not valid !" % vat_number)) return False return True def vat_change(self, cr, uid, ids, value, context=None): return {'value': {'vat_subjected': bool(value)}} def _commercial_fields(self, cr, uid, context=None): return super(res_partner, self)._commercial_fields(cr, uid, context=context) + ['vat_subjected'] def _construct_constraint_msg(self, cr, uid, ids, context=None): def default_vat_check(cn, vn): # by default, a VAT number is valid if: # it starts with 2 letters # has more than 3 characters return cn[0] in string.ascii_lowercase and cn[1] in string.ascii_lowercase vat_country, vat_number = self._split_vat(self.browse(cr, uid, ids)[0].vat) vat_no = "'CC##' (CC=Country Code, ##=VAT Number)" error_partner = self.browse(cr, uid, ids, context=context) if default_vat_check(vat_country, vat_number): vat_no = _ref_vat[vat_country] if vat_country in _ref_vat else vat_no if self.pool['res.users'].browse(cr, uid, uid).company_id.vat_check_vies: return '\n' + _('The VAT number [%s] for partner [%s] either failed the VIES VAT validation check or did not respect the expected format %s.') % (error_partner[0].vat, error_partner[0].name, vat_no) return '\n' + _('The VAT number [%s] for partner [%s] does not seem to be valid. \nNote: the expected format is %s') % (error_partner[0].vat, error_partner[0].name, vat_no) _constraints = [(check_vat, _construct_constraint_msg, ["vat"])] __check_vat_ch_re1 = re.compile(r'(MWST|TVA|IVA)[0-9]{6}$') __check_vat_ch_re2 = re.compile(r'E([0-9]{9}|-[0-9]{3}\.[0-9]{3}\.[0-9]{3})(MWST|TVA|IVA)$') def check_vat_ch(self, vat): ''' Check Switzerland VAT number. ''' # VAT number in Switzerland will change between 2011 and 2013 # http://www.estv.admin.ch/mwst/themen/00154/00589/01107/index.html?lang=fr # Old format is "TVA 123456" we will admit the user has to enter ch before the number # Format will becomes such as "CHE-999.999.99C TVA" # Both old and new format will be accepted till end of 2013 # Accepted format are: (spaces are ignored) # CH TVA ###### # CH IVA ###### # CH MWST ####### # # CHE#########MWST # CHE#########TVA # CHE#########IVA # CHE-###.###.### MWST # CHE-###.###.### TVA # CHE-###.###.### IVA # if self.__check_vat_ch_re1.match(vat): return True match = self.__check_vat_ch_re2.match(vat) if match: # For new TVA numbers, do a mod11 check num = filter(lambda s: s.isdigit(), match.group(1)) # get the digits only factor = (5,4,3,2,7,6,5,4) csum = sum([int(num[i]) * factor[i] for i in range(8)]) check = (11 - (csum % 11)) % 11 return check == int(num[8]) return False def _ie_check_char(self, vat): vat = vat.zfill(8) extra = 0 if vat[7] not in ' W': if vat[7].isalpha(): extra = 9 * (ord(vat[7]) - 64) else: # invalid return -1 checksum = extra + sum((8-i) * int(x) for i, x in enumerate(vat[:7])) return 'WABCDEFGHIJKLMNOPQRSTUV'[checksum % 23] def check_vat_ie(self, vat): """ Temporary Ireland VAT validation to support the new format introduced in January 2013 in Ireland, until upstream is fixed. TODO: remove when fixed upstream""" if len(vat) not in (8, 9) or not vat[2:7].isdigit(): return False if len(vat) == 8: # Normalize pre-2013 numbers: final space or 'W' not significant vat += ' ' if vat[:7].isdigit(): return vat[7] == self._ie_check_char(vat[:7] + vat[8]) elif vat[1] in (string.ascii_uppercase + '+*'): # Deprecated format # See http://www.revenue.ie/en/online/third-party-reporting/reporting-payment-details/faqs.html#section3 return vat[7] == self._ie_check_char(vat[2:7] + vat[0] + vat[8]) return False # Mexican VAT verification, contributed by Vauxoo # and Panos Christeas <p_christ@hol.gr> __check_vat_mx_re = re.compile(r"(?P<primeras>[A-Za-z\xd1\xf1&]{3,4})" \ r"[ \-_]?" \ r"(?P<ano>[0-9]{2})(?P<mes>[01][0-9])(?P<dia>[0-3][0-9])" \ r"[ \-_]?" \ r"(?P<code>[A-Za-z0-9&\xd1\xf1]{3})$") def check_vat_mx(self, vat): ''' Mexican VAT verification Verificar RFC México ''' # we convert to 8-bit encoding, to help the regex parse only bytes vat = ustr(vat).encode('iso8859-1') m = self.__check_vat_mx_re.match(vat) if not m: #No valid format return False try: ano = int(m.group('ano')) if ano > 30: ano = 1900 + ano else: ano = 2000 + ano datetime.date(ano, int(m.group('mes')), int(m.group('dia'))) except ValueError: return False #Valid format and valid date return True # Norway VAT validation, contributed by Rolv Råen (adEgo) <rora@adego.no> def check_vat_no(self, vat): ''' Check Norway VAT number.See http://www.brreg.no/english/coordination/number.html ''' if len(vat) != 9: return False try: int(vat) except ValueError: return False sum = (3 * int(vat[0])) + (2 * int(vat[1])) + \ (7 * int(vat[2])) + (6 * int(vat[3])) + \ (5 * int(vat[4])) + (4 * int(vat[5])) + \ (3 * int(vat[6])) + (2 * int(vat[7])) check = 11 -(sum % 11) if check == 11: check = 0 if check == 10: # 10 is not a valid check digit for an organization number return False return check == int(vat[8]) # Peruvian VAT validation, contributed by Vauxoo def check_vat_pe(self, vat): vat_type,vat = vat and len(vat)>=2 and (vat[0], vat[1:]) or (False, False) if vat_type and vat_type.upper() == 'D': #DNI return True elif vat_type and vat_type.upper() == 'R': #verify RUC factor = '5432765432' sum = 0 dig_check = False if len(vat) != 11: return False try: int(vat) except ValueError: return False for f in range(0,10): sum += int(factor[f]) * int(vat[f]) subtraction = 11 - (sum % 11) if subtraction == 10: dig_check = 0 elif subtraction == 11: dig_check = 1 else: dig_check = subtraction return int(vat[10]) == dig_check else: return False # VAT validation in Turkey, contributed by # Levent Karakas @ Eska Yazilim A.S. def check_vat_tr(self, vat): if not (10 <= len(vat) <= 11): return False try: int(vat) except ValueError: return False # check vat number (vergi no) if len(vat) == 10: sum = 0 check = 0 for f in range(0,9): c1 = (int(vat[f]) + (9-f)) % 10 c2 = ( c1 * (2 ** (9-f)) ) % 9 if (c1 != 0) and (c2 == 0): c2 = 9 sum += c2 if sum % 10 == 0: check = 0 else: check = 10 - (sum % 10) return int(vat[9]) == check # check personal id (tc kimlik no) if len(vat) == 11: c1a = 0 c1b = 0 c2 = 0 for f in range(0,9,2): c1a += int(vat[f]) for f in range(1,9,2): c1b += int(vat[f]) c1 = ( (7 * c1a) - c1b) % 10 for f in range(0,10): c2 += int(vat[f]) c2 = c2 % 10 return int(vat[9]) == c1 and int(vat[10]) == c2 return False
webknjaz/routes
refs/heads/main
tests/test_units/test_base.py
5
import unittest from routes import request_config, _RequestConfig from routes.base import Route class TestBase(unittest.TestCase): def test_route(self): route = Route(None, ':controller/:action/:id') assert not route.static def test_request_config(self): orig_config = request_config() class Obby(object): pass myobj = Obby() class MyCallable(object): def __init__(self): class Obby(object): pass self.obj = myobj def __call__(self): return self.obj mycall = MyCallable() if hasattr(orig_config, 'using_request_local'): orig_config.request_local = mycall config = request_config() assert id(myobj) == id(config) old_config = request_config(original=True) assert issubclass(old_config.__class__, _RequestConfig) is True del orig_config.request_local if __name__ == '__main__': unittest.main()
pythonprobr/tekton
refs/heads/master
backend/apps/produto_app/produto_facade.py
1
# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals from produto_app.produto_commands import ListProdutoCommand, UpdateProdutoCommand, ProdutoForm,\ GetProdutoCommand, ListarProdutosPorCategoria, SalvarProdutoAtreladoACategoria, \ DeletarProdutosEArcoParaCategoria def save_produto_cmd(categoria, **produto_properties): """ Command to save Produto entity :param produto_properties: a dict of properties to save on model :return: a Command that save Produto, validating and localizing properties received as strings """ return SalvarProdutoAtreladoACategoria(categoria, **produto_properties) def update_produto_cmd(produto_id, **produto_properties): """ Command to update Produto entity with id equals 'produto_id' :param produto_properties: a dict of properties to update model :return: a Command that update Produto, validating and localizing properties received as strings """ return UpdateProdutoCommand(produto_id, **produto_properties) def list_produtos_cmd(): """ Command to list Produto entities ordered by their creation dates :return: a Command proceed the db operations when executed """ return ListProdutoCommand() def produto_form(**kwargs): """ Function to get Produto's detail form. :param kwargs: form properties :return: Form """ return ProdutoForm(**kwargs) def get_produto_cmd(produto_id): """ Find produto by her id :param produto_id: the produto id :return: Command """ return GetProdutoCommand(produto_id) def delete_produto_cmd(produto_id): """ Construct a command to delete a Produto :param produto_id: produto's id :return: Command """ return DeletarProdutosEArcoParaCategoria(produto_id) def listar_produtos_por_categoria_cmd(categoria): return ListarProdutosPorCategoria(categoria)
ceesprojectofficial/rest_api
refs/heads/master
requesthandler.py
2
""" File: requesthandler.py Author: Fernando Crespo Gravalos (cees.project.official@gmail.com) Date: 2014/6/18 """ """ These functions provide request handling, leaving the views receive requests and sending real HTTP CEES responses. """ import ceesvalidator as cv import ceesdbwrapper as cdbw import constants as c import logmessages as lm import pushnotification as pn from ceesloggers import getCeesAppLogger applogger = getCeesAppLogger() #################### # Common functions # #################### def getToken(request): """ Checks the Authentication header containing the token. """ try: tokenId = request.META[c.AUTHENTICATION] #request.META[c.AUTHENTICATION] except KeyError as re: applogger.exception(lm.MISSING_TOKEN + '\n' + str(re)) return (c.UNAUTHORIZED, '') token = cdbw.getToken(tokenId) if token == c.OBJECT_NOT_FOUND: applogger.warning(lm.TOKEN_NOT_FOUND) return (c.UNAUTHORIZED, '') elif token == c.DB_ERROR: applogger.error(lm.DB_ERROR) return (c.INTERNAL_SERVER_ERROR, '') return (c.OK, token) ############################################# # Functions related to login: login, logout # ############################################# def login(request): """ Login shop assistants. If email and password matches returns a token. """ data = request.DATA # Parsing request. If it is malformed, Django will return HTTP 400 automatically. validation_result = cv.CeesValidator().validate(data, c.LOGIN) # Validating request against schema. if validation_result == c.VALID_SUCC: # Validation successful. Extracting data. email = data.get(c.EMAIL, False) password = data.get(c.PASSWORD, False) macAddress = data.get(c.MAC_ADDRESS, False) auth = cdbw.checkLoginCredentials(email, password, macAddress) # Checking credentials. if auth == c.OBJECT_NOT_FOUND: # Credentials not found. Returns HTTP 401. applogger.warning(lm.CREDENTIALS_NOT_FOUND) return (c.UNAUTHORIZED, '') elif auth == c.DB_ERROR: # Could not persist token. Returns HTTP 500. applogger.error(lm.DB_ERROR) return (c.INTERNAL_SERVER_ERROR, '') applogger.info(lm.LOGGED_IN + email) return (c.CREATED, auth) # Ok. Returns HTTP 201. elif validation_result == c.IOERR: # IOError. Returns HTTP 500. applogger.error(lm.SCHEMA_NOT_FOUND) return (c.INTERNAL_SERVER_ERROR, '') applogger.error(lm.VALIDATION_ERROR) return (c.BAD_REQUEST, '') # Validation Error. Returns HTTP 400. def logout(request): """ Deletes the token from database causing the log-out. """ try: if cdbw.deleteToken(request.META[c.AUTHENTICATION]) == c.SUCC_QUERY: return c.OK # Ok. Returns HTTP 200. else: applogger.error(lm.DB_ERROR) return c.INTERNAL_SERVER_ERROR # Database error. Returns HTTP 500. except KeyError as re: # If no Authentication header found, returns HTTP 401. applogger(lm.MISSING_TOKEN) return c.UNAUTHORIZED ####################################################################### # Functions related to check-in process: getStores, checkin, checkout # ####################################################################### def getStores(request): """ Given the token obtained during the login retrieves the sotores available for the linked shop assistant. """ (status, token) = getToken(request) # Check the token in the Authentication header. if status != c.OK: return (status, '') tokenId = token.id # If the token is present and valid, get the linked shop assistant. sa = cdbw.getShopAssistant(tokenId) if sa == c.DB_ERROR: applogger(lm.DB_ERROR) return (c.INTERNAL_SERVER_ERROR, '') customer = cdbw.getCustomer(sa.id) if customer == c.DB_ERROR: applogger(lm.DB_ERROR) return (c.INTERNAL_SERVER_ERROR, '') stores = cdbw.getStores(customer.id) # If no error, get the customer linked to a shop assistant. if stores == c.OBJECT_NOT_FOUND: applogger.warning(lm.STORE_NOT_FOUND) return (c.NOT_FOUND, '') elif stores == c.DB_ERROR: applogger.error(lm.DB_ERROR) return (c.INTERNAL_SERVER_ERROR, '') return (c.OK, stores) # If no error, return the store list linked to a customer. def checkin(request): """ Given a store (city, address), this function will persist the shop assistant and the store in database. """ (status, token) = getToken(request) # Check the token in the Authentication header. if status != c.OK: return status tokenId = token.id # If the token is present and valid, get the linked shop assistant. sa = cdbw.getShopAssistant(tokenId) device = cdbw.getDevice(tokenId) if sa == c.DB_ERROR or device == c.DB_ERROR: # If there is no shop assistant or device linked to the token it's a database error. return c.INTERNAL_SERVER_ERROR registration = cdbw.getRegistrationId(device) if registration == c.OBJECT_NOT_FOUND: return c.NOT_FOUND elif registration == c.DB_ERROR: return c.INTERNAL_SERVER_ERROR data = request.DATA # Parsing request. If it is malformed, Django will return HTTP 400 automatically. validationResult = cv.CeesValidator().validate(data, c.CHECKIN) # Validating request against schema. if validationResult == c.VALID_SUCC: # Validation successful. Extracting data. city = data.get(c.CITY) address = data.get(c.ADDRESS) store = cdbw.getStore(city, address) if store == c.OBJECT_NOT_FOUND: # Very rare. This is checking that the store was deleted after the login but before the checkin. return c.NOT_FOUND elif store == c.DB_ERROR: return c.INTERNAL_SERVER_ERROR else: if cdbw.checkIn(token, registration, store) != c.SUCC_QUERY: return c.INTERNAL_SERVER_ERROR return c.CREATED # Checkin persisted. HTTP 201. return c.BAD_REQUEST # Validation Error. Returns HTTP 400. def checkout(request): """ This function removes the entry in check_ins for the shop assistant linked to the token in the request. """ (status, token) = getToken(request) # Check the token in the Authentication header. if status != c.OK: return status if cdbw.checkOut(token) == c.SUCC_QUERY: # Deleted entry in check-ins table. return c.OK return c.INTERNAL_SERVER_ERROR ################################################################## # Function related to client arrivals: newArrival, getClientInfo # ################################################################## def getArrivalsInfo(request): """ Returns the clients who are waiting to be attended by a shop assistant. """ (status, token) = getToken(request) # Check the token in the Authentication header. if status != c.OK: return (status, '') store = cdbw.getStoreFromCheckIn(token) if store == c.DB_ERROR: applogger.info(lm.DB_ERROR) return (c.INTERNAL_SERVER_ERROR, '') clients = cdbw.getAwaitingClients(store) if clients == c.OBJECT_NOT_FOUND: return (c.NOT_FOUND, '') elif clients == c.DB_ERROR: applogger(lm.DB_ERROR) return (c.INTERNAL_SERVER_ERROR, '') return (c.OK, clients) def newArrival(request): """ Stores a new arrival and send notification to shop assistants who have checked-in the store linked to the given request. """ data = request.DATA # Parsing request. If it is malformed, Django will return HTTP 400 automatically. validationResult = cv.CeesValidator().validate(data, c.DETECT) # Validating request against schema. if validationResult == c.VALID_SUCC: # Validation successful. Extracting data. customerId = data.get(c.CUSTOMERID, False) storeId = data.get(c.STOREID, False) rfid = data.get(c.RFID, False) client = cdbw.getClientFromRFID(rfid) if client == c.OBJECT_NOT_FOUND: applogger.warning(lm.RFID_NOT_FOUND) return c.NOT_FOUND elif client == c.DB_ERROR: applogger.error(lm.DB_ERROR) return c.INTERNAL_SERVER_ERROR if client.customer.id != customerId: # Client who enter in a store which is owned by a different customer. applogger.error(lm.CLIENT_NOT_ALLOWED) return c.FORBIDDEN store = cdbw.getStoreFromId(storeId) if store == c.OBJECT_NOT_FOUND: applogger.warning(lm.STORE_NOT_FOUND) return c.NOT_FOUND elif store == c.DB_ERROR: applogger.error(lm.DB_ERROR) return c.INTERNAL_SERVER_ERROR if cdbw.saveArrival(client, store) == c.DB_ERROR: # Everything OK. Save arrival. applogger.error(lm.DB_ERROR) return c.INTERNAL_SERVER_ERROR registrationIds = cdbw.getRegistrationIds(store) if registrationIds != c.OBJECT_NOT_FOUND and registrationIds != c.DB_ERROR and registrationIds != []: applogger.info(lm.SENDING_NOTIFICATION) pn.sendNotification(registrationIds, client) return c.CREATED return c.BAD_REQUEST # Validation Error. Returns HTTP 400. def updateArrivalStatus(request): (status, token) = getToken(request) # Check the token in the Authentication header. if status != c.OK: return status data = request.DATA clientId = data.get('client_id') status = data.get('status') result = cdbw.updateArrivalStatus(clientId, status) if result == c.OBJECT_NOT_FOUND: applogger.warning(lm.CLIENT_NOT_FOUND) return c.NOT_FOUND elif result == c.DB_ERROR: applogger.error(lm.DB_ERROR) return c.INTERNAL_SERVER_ERROR return c.OK #################################################################### # Functions related to GCM: getProjectId, updateRegId, deleteRegId # #################################################################### def getProjectId(request): """ Returns the project linked to a given license. If the license_key is not found will return a 401, otherwise 500. """ license_key = request.QUERY_PARAMS.get('arg0') projectId = cdbw.getProjectIdByLicenseKey(license_key); if projectId == c.OBJECT_NOT_FOUND: return (c.UNAUTHORIZED,'') #License not found elif projectId == c.DB_ERROR: return (c.INTERNAL_SERVER_ERROR, '') return (c.OK, projectId) def saveRegId(request): """ Creates a new registration entry in cees database. """ data = request.DATA; validationResult = cv.CeesValidator().validate(data, c.GCM) if validationResult == c.VALID_SUCC: # Validation successful. Extracting data. macAddress = data.get('macAddress') regId = data.get('registrationID') device = cdbw.getDeviceByMacAddress(macAddress) if device == c.DB_ERROR: applogger.error(lm.DB_ERROR) return c.INTERNAL_SERVER_ERROR elif device == c.OBJECT_NOT_FOUND: applogger.warning(lm.UNKNOWN_DEVICE) return c.UNAUTHORIZED response = cdbw.saveRegistration(device, regId) if response == c.DB_ERROR: applogger.error(lm.DB_ERROR) return c.INTERNAL_SERVER_ERROR return c.OK return c.BAD_REQUEST def updateRegId(request): """ Updates the registration Id linked to a device. """ (status, token) = getToken(request) # Check the token in the Authentication header. if status != c.OK: return status device = cdbw.getDevice(token.id) if device == c.DB_ERROR: return c.INTERNAL_SERVER_ERROR registration = cdbw.getRegistrationId(device) if registration == c.OBJECT_NOT_FOUND: return c.NOT_FOUND elif registration == c.INTERNAL_SERVER_ERROR: return c.INTERNAL_SERVER_ERROR data = request.DATA regId = data.get('registrationID') result = cdbw.updateRegistrationId(registration.registration_id, regId) if result == c.OBJECT_NOT_FOUND: applogger.warning(lm.REGISTRATION_NOT_FOUND) return c.NOT_FOUND elif result == c.DB_ERROR: applogger.error(lm.DB_ERROR) return c.INTERNAL_SERVER_ERROR return c.OK
TecnoSalta/bg
refs/heads/master
mezzanine/pages/models.py
4
from __future__ import unicode_literals from future.builtins import filter, str try: from urllib.parse import urljoin except ImportError: # Python 2 from urlparse import urljoin from django.core.urlresolvers import resolve, reverse from django.db import models from django.utils.encoding import python_2_unicode_compatible from django.utils.translation import ugettext_lazy as _, ugettext from mezzanine.conf import settings from mezzanine.core.models import Displayable, Orderable, RichText from mezzanine.pages.fields import MenusField from mezzanine.pages.managers import PageManager from mezzanine.utils.urls import path_to_slug, slugify class BasePage(Orderable, Displayable): """ Exists solely to store ``PageManager`` as the main manager. If it's defined on ``Page``, a concrete model, then each ``Page`` subclass loses the custom manager. """ objects = PageManager() class Meta: abstract = True @python_2_unicode_compatible class Page(BasePage): """ A page in the page tree. This is the base class that custom content types need to subclass. """ parent = models.ForeignKey("Page", blank=True, null=True, related_name="children") in_menus = MenusField(_("Show in menus"), blank=True, null=True) titles = models.CharField(editable=False, max_length=1000, null=True) content_model = models.CharField(editable=False, max_length=50, null=True) login_required = models.BooleanField(_("Login required"), default=False, help_text=_("If checked, only logged in users can view this page")) class Meta: verbose_name = _("Page") verbose_name_plural = _("Pages") ordering = ("titles",) order_with_respect_to = "parent" def __str__(self): return self.titles def get_absolute_url(self): """ URL for a page - for ``Link`` page types, simply return its slug since these don't have an actual URL pattern. Also handle the special case of the homepage being a page object. """ slug = self.slug if self.content_model == "link": # Ensure the URL is absolute. slug = urljoin('/', slug) return slug if slug == "/": return reverse("home") else: return reverse("page", kwargs={"slug": slug}) def save(self, *args, **kwargs): """ Create the titles field using the titles up the parent chain and set the initial value for ordering. """ if self.id is None: self.content_model = self._meta.object_name.lower() titles = [self.title] parent = self.parent while parent is not None: titles.insert(0, parent.title) parent = parent.parent self.titles = " / ".join(titles) super(Page, self).save(*args, **kwargs) def description_from_content(self): """ Override ``Displayable.description_from_content`` to load the content type subclass for when ``save`` is called directly on a ``Page`` instance, so that all fields defined on the subclass are available for generating the description. """ if self.__class__ == Page: content_model = self.get_content_model() if content_model: return content_model.description_from_content() return super(Page, self).description_from_content() def get_ascendants(self, for_user=None): """ Returns the ascendants for the page. Ascendants are cached in the ``_ascendants`` attribute, which is populated when the page is loaded via ``Page.objects.with_ascendants_for_slug``. """ if not self.parent_id: # No parents at all, bail out. return [] if not hasattr(self, "_ascendants"): # _ascendants has not been either page.get_ascendants or # Page.objects.assigned by with_ascendants_for_slug, so # run it to see if we can retrieve all parents in a single # query, which will occur if the slugs for each of the pages # have not been customised. if self.slug: kwargs = {"for_user": for_user} pages = Page.objects.with_ascendants_for_slug(self.slug, **kwargs) self._ascendants = pages[0]._ascendants else: self._ascendants = [] if not self._ascendants: # Page has a parent but with_ascendants_for_slug failed to # find them due to custom slugs, so retrieve the parents # recursively. child = self while child.parent_id is not None: self._ascendants.append(child.parent) child = child.parent return self._ascendants @classmethod def get_content_models(cls): """ Return all Page subclasses. """ is_content_model = lambda m: m is not Page and issubclass(m, Page) return list(filter(is_content_model, models.get_models())) def get_content_model(self): """ Provies a generic method of retrieving the instance of the custom content type's model for this page. """ return getattr(self, self.content_model, None) def get_slug(self): """ Recursively build the slug from the chain of parents. """ slug = slugify(self.title) if self.parent is not None: return "%s/%s" % (self.parent.slug, slug) return slug def set_slug(self, new_slug): """ Changes this page's slug, and all other pages whose slugs start with this page's slug. """ slug_prefix = "%s/" % self.slug for page in Page.objects.filter(slug__startswith=slug_prefix): if not page.overridden(): page.slug = new_slug + page.slug[len(self.slug):] page.save() self.slug = new_slug def set_parent(self, new_parent): """ Change the parent of this page, changing this page's slug to match the new parent if necessary. """ self_slug = self.slug old_parent_slug = self.parent.slug if self.parent else "" new_parent_slug = new_parent.slug if new_parent else "" # Make sure setting the new parent won't cause a cycle. parent = new_parent while parent is not None: if parent.pk == self.pk: raise AttributeError("You can't set a page or its child as" " a parent.") parent = parent.parent self.parent = new_parent self.save() if self_slug: if not old_parent_slug: self.set_slug("/".join((new_parent_slug, self.slug))) elif self.slug.startswith(old_parent_slug): new_slug = self.slug.replace(old_parent_slug, new_parent_slug, 1) self.set_slug(new_slug.strip("/")) def overridden(self): """ Returns ``True`` if the page's slug has an explicitly defined urlpattern and is therefore considered to be overridden. """ from mezzanine.pages.views import page page_url = reverse("page", kwargs={"slug": self.slug}) resolved_view = resolve(page_url)[0] return resolved_view != page def can_add(self, request): """ Dynamic ``add`` permission for content types to override. """ return self.slug != "/" def can_change(self, request): """ Dynamic ``change`` permission for content types to override. """ return True def can_delete(self, request): """ Dynamic ``delete`` permission for content types to override. """ return True def can_move(self, request, new_parent): """ Dynamic ``move`` permission for content types to override. Controls whether a given page move in the page tree is permitted. When the permission is denied, raises a ``PageMoveException`` with a single argument (message explaining the reason). """ pass def set_helpers(self, context): """ Called from the ``page_menu`` template tag and assigns a handful of properties based on the current page, that are used within the various types of menus. """ current_page = context["_current_page"] current_page_id = getattr(current_page, "id", None) current_parent_id = getattr(current_page, "parent_id", None) # Am I a child of the current page? self.is_current_child = self.parent_id == current_page_id self.is_child = self.is_current_child # Backward compatibility # Is my parent the same as the current page's? self.is_current_sibling = self.parent_id == current_parent_id # Am I the current page? try: request = context["request"] except KeyError: # No request context, most likely when tests are run. self.is_current = False else: self.is_current = self.slug == path_to_slug(request.path_info) # Is the current page me or any page up the parent chain? def is_c_or_a(page_id): parent_id = context.get("_parent_page_ids", {}).get(page_id) return self.id == page_id or (parent_id and is_c_or_a(parent_id)) self.is_current_or_ascendant = lambda: bool(is_c_or_a(current_page_id)) self.is_current_parent = self.id == current_parent_id # Am I a primary page? self.is_primary = self.parent_id is None # What's an ID I can use in HTML? self.html_id = self.slug.replace("/", "-") # Default branch level - gets assigned in the page_menu tag. self.branch_level = 0 def in_menu_template(self, template_name): if self.in_menus is not None: for i, l, t in settings.PAGE_MENU_TEMPLATES: if not str(i) in self.in_menus and t == template_name: return False return True def get_template_name(self): """ Subclasses can implement this to provide a template to use in ``mezzanine.pages.views.page``. """ return None class RichTextPage(Page, RichText): """ Implements the default type of page with a single Rich Text content field. """ class Meta: verbose_name = _("Rich text page") verbose_name_plural = _("Rich text pages") class Link(Page): """ A general content type for creating external links in the page menu. """ class Meta: verbose_name = _("Link") verbose_name_plural = _("Links") class PageMoveException(Exception): """ Raised by ``can_move()`` when the move permission is denied. Takes an optinal single argument: a message explaining the denial. """ def __init__(self, msg=None): self.msg = msg or ugettext("Illegal page move") def __str__(self): return self.msg __unicode__ = __str__
kate-v-stepanova/scilifelab
refs/heads/master
tests/bcbio/test_bcbio.py
4
import os import tempfile import shutil import unittest from ..data import data_files from scilifelab.bcbio.qc import RunInfoParser filedir = os.path.abspath(os.path.realpath(os.path.dirname(__file__))) RunInfo = data_files["RunInfo.xml"] class TestBcbioQC(unittest.TestCase): """Test for bcbio qc module""" def setUp(self): self.rootdir = tempfile.mkdtemp(prefix="test_bcbio_qc_") def tearDown(self): shutil.rmtree(self.rootdir) def test_parse_runinfo(self): temp = tempfile.TemporaryFile(mode="w+t") temp.write(RunInfo) temp.seek(0) rip = RunInfoParser() res = rip.parse(temp) self.assertEqual(res["Id"], "120924_SN0002_0003_CC003CCCXX") self.assertEqual(res["Flowcell"], "CC003CCCXX") self.assertEqual(res["Instrument"], "SN0002") self.assertEqual(res["Date"], "120924")
keedio/hue
refs/heads/master
desktop/core/ext-py/python-ldap-2.3.13/Tests/t_cext.py
40
import unittest, slapd import _ldap import logging reusable_server = None def get_reusable_server(): global reusable_server if reusable_server is None: reusable_server = slapd.Slapd() return reusable_server class TestLdapCExtension(unittest.TestCase): """Tests the LDAP C Extension module, _ldap. These tests apply only to the _ldap module and bypass the LDAPObject wrapper completely.""" timeout = 3 def _init_server(self, reuse_existing=True): global reusable_server """Sets self.server to a test LDAP server and self.base to its base""" if reuse_existing: server = get_reusable_server() else: server = slapd.Slapd() # private server #server.set_debug() # enables verbose messages server.start() # no effect if already started self.server = server self.base = server.get_dn_suffix() return server def _init(self, reuse_existing=True, bind=True): """Starts a server, and returns a LDAPObject bound to it""" server = self._init_server(reuse_existing) l = _ldap.initialize(server.get_url()) if bind: # Perform a simple bind l.set_option(_ldap.OPT_PROTOCOL_VERSION, _ldap.VERSION3) m = l.simple_bind(server.get_root_dn(), server.get_root_password()) result,pmsg,msgid,ctrls = l.result3(m, _ldap.MSG_ONE, self.timeout) self.assertTrue(result, _ldap.RES_BIND) return l def assertNotNone(self, expr, msg=None): self.failIf(expr is None, msg or repr(expr)) def assertNone(self, expr, msg=None): self.failIf(expr is not None, msg or repr(expr)) # Test for the existence of a whole bunch of constants # that the C module is supposed to export def test_constants(self): self.assertEquals(_ldap.PORT, 389) self.assertEquals(_ldap.VERSION1, 1) self.assertEquals(_ldap.VERSION2, 2) self.assertEquals(_ldap.VERSION3, 3) # constants for result3() self.assertEquals(_ldap.RES_BIND, 0x61) self.assertEquals(_ldap.RES_SEARCH_ENTRY, 0x64) self.assertEquals(_ldap.RES_SEARCH_RESULT, 0x65) self.assertEquals(_ldap.RES_MODIFY, 0x67) self.assertEquals(_ldap.RES_ADD, 0x69) self.assertEquals(_ldap.RES_DELETE, 0x6b) self.assertEquals(_ldap.RES_MODRDN, 0x6d) self.assertEquals(_ldap.RES_COMPARE, 0x6f) self.assertEquals(_ldap.RES_SEARCH_REFERENCE, 0x73) # v3 self.assertEquals(_ldap.RES_EXTENDED, 0x78) # v3 #self.assertEquals(_ldap.RES_INTERMEDIATE, 0x79) # v3 self.assertNotNone(_ldap.RES_ANY) self.assertNotNone(_ldap.RES_UNSOLICITED) self.assertNotNone(_ldap.AUTH_NONE) self.assertNotNone(_ldap.AUTH_SIMPLE) self.assertNotNone(_ldap.SCOPE_BASE) self.assertNotNone(_ldap.SCOPE_ONELEVEL) self.assertNotNone(_ldap.SCOPE_SUBTREE) self.assertNotNone(_ldap.MOD_ADD) self.assertNotNone(_ldap.MOD_DELETE) self.assertNotNone(_ldap.MOD_REPLACE) self.assertNotNone(_ldap.MOD_INCREMENT) self.assertNotNone(_ldap.MOD_BVALUES) # for result3() self.assertNotNone(_ldap.MSG_ONE) self.assertNotNone(_ldap.MSG_ALL) self.assertNotNone(_ldap.MSG_RECEIVED) # for OPT_DEFEF self.assertNotNone(_ldap.DEREF_NEVER) self.assertNotNone(_ldap.DEREF_SEARCHING) self.assertNotNone(_ldap.DEREF_FINDING) self.assertNotNone(_ldap.DEREF_ALWAYS) # for OPT_SIZELIMIT, OPT_TIMELIMIT self.assertNotNone(_ldap.NO_LIMIT) # standard options self.assertNotNone(_ldap.OPT_API_INFO) self.assertNotNone(_ldap.OPT_DEREF) self.assertNotNone(_ldap.OPT_SIZELIMIT) self.assertNotNone(_ldap.OPT_TIMELIMIT) self.assertNotNone(_ldap.OPT_REFERRALS) self.assertNotNone(_ldap.OPT_RESTART) self.assertNotNone(_ldap.OPT_PROTOCOL_VERSION) self.assertNotNone(_ldap.OPT_SERVER_CONTROLS) self.assertNotNone(_ldap.OPT_CLIENT_CONTROLS) self.assertNotNone(_ldap.OPT_API_FEATURE_INFO) self.assertNotNone(_ldap.OPT_HOST_NAME) self.assertNotNone(_ldap.OPT_ERROR_NUMBER) # = OPT_RESULT_CODE self.assertNotNone(_ldap.OPT_ERROR_STRING) # = OPT_DIAGNOSITIC_MESSAGE self.assertNotNone(_ldap.OPT_MATCHED_DN) # OpenLDAP specific self.assertNotNone(_ldap.OPT_DEBUG_LEVEL) self.assertNotNone(_ldap.OPT_TIMEOUT) self.assertNotNone(_ldap.OPT_REFHOPLIMIT) self.assertNotNone(_ldap.OPT_NETWORK_TIMEOUT) self.assertNotNone(_ldap.OPT_URI) #self.assertNotNone(_ldap.OPT_REFERRAL_URLS) #self.assertNotNone(_ldap.OPT_SOCKBUF) #self.assertNotNone(_ldap.OPT_DEFBASE) #self.assertNotNone(_ldap.OPT_CONNECT_ASYNC) # str2dn() self.assertNotNone(_ldap.DN_FORMAT_LDAP) self.assertNotNone(_ldap.DN_FORMAT_LDAPV3) self.assertNotNone(_ldap.DN_FORMAT_LDAPV2) self.assertNotNone(_ldap.DN_FORMAT_DCE) self.assertNotNone(_ldap.DN_FORMAT_UFN) self.assertNotNone(_ldap.DN_FORMAT_AD_CANONICAL) self.assertNotNone(_ldap.DN_FORMAT_MASK) self.assertNotNone(_ldap.DN_PRETTY) self.assertNotNone(_ldap.DN_SKIP) self.assertNotNone(_ldap.DN_P_NOLEADTRAILSPACES) self.assertNotNone(_ldap.DN_P_NOSPACEAFTERRDN) self.assertNotNone(_ldap.DN_PEDANTIC) self.assertNotNone(_ldap.AVA_NULL) self.assertNotNone(_ldap.AVA_STRING) self.assertNotNone(_ldap.AVA_BINARY) self.assertNotNone(_ldap.AVA_NONPRINTABLE) # these two constants are pointless? XXX self.assertEquals(_ldap.LDAP_OPT_ON, 1) self.assertEquals(_ldap.LDAP_OPT_OFF, 0) # these constants useless after ldap_url_parse() was dropped XXX self.assertNotNone(_ldap.URL_ERR_BADSCOPE) self.assertNotNone(_ldap.URL_ERR_MEM) def test_simple_bind(self): l = self._init() def test_simple_anonymous_bind(self): l = self._init(bind=False) m = l.simple_bind("", "") result,pmsg,msgid,ctrls = l.result3(m, _ldap.MSG_ALL, self.timeout) self.assertTrue(result, _ldap.RES_BIND) self.assertEquals(msgid, m) self.assertEquals(pmsg, []) self.assertEquals(ctrls, []) # see if we can get the rootdse while we're here m = l.search_ext("", _ldap.SCOPE_BASE, '(objectClass=*)') result,pmsg,msgid,ctrls = l.result3(m, _ldap.MSG_ALL, self.timeout) self.assertEquals(result, _ldap.RES_SEARCH_RESULT) self.assertEquals(pmsg[0][0], "") # rootDSE has no dn self.assertEquals(msgid, m) self.assertTrue(pmsg[0][1].has_key('objectClass')) def test_unbind(self): l = self._init() m = l.unbind_ext() self.assertNone(m) # Second attempt to unbind should yield an exception try: l.unbind_ext() except _ldap.error: pass def test_search_ext_individual(self): l = self._init() m = l.search_ext(self.base, _ldap.SCOPE_SUBTREE, '(objectClass=dcObject)') result,pmsg,msgid,ctrls = l.result3(m, _ldap.MSG_ONE, self.timeout) # Expect to get just one object self.assertEquals(result, _ldap.RES_SEARCH_ENTRY) self.assertEquals(len(pmsg), 1) self.assertEquals(len(pmsg[0]), 2) self.assertEquals(pmsg[0][0], self.base) self.assertEquals(pmsg[0][0], self.base) self.assertTrue('dcObject' in pmsg[0][1]['objectClass']) self.assertTrue('organization' in pmsg[0][1]['objectClass']) self.assertEquals(msgid, m) self.assertEquals(ctrls, []) result,pmsg,msgid,ctrls = l.result3(m, _ldap.MSG_ONE, self.timeout) self.assertEquals(result, _ldap.RES_SEARCH_RESULT) self.assertEquals(pmsg, []) self.assertEquals(msgid, m) self.assertEquals(ctrls, []) def test_abandon(self): l = self._init() m = l.search_ext(self.base, _ldap.SCOPE_SUBTREE, '(objectClass=*)') ret = l.abandon_ext(m) self.assertNone(ret) got_timeout = False try: r = l.result3(m, _ldap.MSG_ALL, 0.3) # (timeout /could/ be longer) except _ldap.TIMEOUT, e: got_timeout = True self.assertTrue(got_timeout) def test_search_ext_all(self): l = self._init() m = l.search_ext(self.base, _ldap.SCOPE_SUBTREE, '(objectClass=*)') result,pmsg,msgid,ctrls = l.result3(m, _ldap.MSG_ALL, self.timeout) # Expect to get some objects self.assertEquals(result, _ldap.RES_SEARCH_RESULT) self.assertTrue(len(pmsg) >= 2) self.assertEquals(msgid, m) self.assertEquals(ctrls, []) def test_add(self): l = self._init() m = l.add_ext("cn=Foo," + self.base, [ ('objectClass','organizationalRole'), ('cn', 'Foo'), ('description', 'testing'), ]) result,pmsg,msgid,ctrls = l.result3(m, _ldap.MSG_ALL, self.timeout) self.assertEquals(result, _ldap.RES_ADD) self.assertEquals(pmsg, []) self.assertEquals(msgid, m) self.assertEquals(ctrls, []) # search for it back m = l.search_ext(self.base, _ldap.SCOPE_SUBTREE, '(cn=Foo)') result,pmsg,msgid,ctrls = l.result3(m, _ldap.MSG_ALL, self.timeout) # Expect to get the objects self.assertEquals(result, _ldap.RES_SEARCH_RESULT) self.assertEquals(len(pmsg), 1) self.assertEquals(msgid, m) self.assertEquals(ctrls, []) self.assertEquals(pmsg[0], ('cn=Foo,'+self.base, { 'objectClass': ['organizationalRole'], 'cn': ['Foo'], 'description': ['testing'] })) def test_compare(self): l = self._init() # first, add an object with a field we can compare on dn = "cn=CompareTest," + self.base m = l.add_ext(dn, [ ('objectClass','person'), ('sn', 'CompareTest'), ('cn', 'CompareTest'), ('userPassword', 'the_password'), ]) result,pmsg,msgid,ctrls = l.result3(m, _ldap.MSG_ALL, self.timeout) self.assertEquals(result, _ldap.RES_ADD) # try a false compare m = l.compare_ext(dn, "userPassword", "bad_string") compared_false = False try: r = l.result3(m, _ldap.MSG_ALL, self.timeout) self.fail(repr(r)) except _ldap.COMPARE_FALSE: compared_false = True self.assertTrue(compared_false) # try a true compare m = l.compare_ext(dn, "userPassword", "the_password") compared_true = False try: r = l.result3(m, _ldap.MSG_ALL, self.timeout) self.fail(repr(r)) except _ldap.COMPARE_TRUE: compared_true = True self.assertTrue(compared_true) m = l.compare_ext(dn, "badAttribute", "ignoreme") raised_error = False try: r = l.result3(m, _ldap.MSG_ALL, self.timeout) self.fail(repr(r)) except _ldap.error: raised_error = True self.assertTrue(raised_error) def test_delete_no_such_object(self): l = self._init() # try deleting an object that doesn't exist not_found = False m = l.delete_ext("cn=DoesNotExist,"+self.base) try: r = l.result3(m, _ldap.MSG_ALL, self.timeout) self.fail(r) except _ldap.NO_SUCH_OBJECT: not_found = True self.assertTrue(not_found) def test_delete(self): l = self._init() # first, add an object we will delete dn = "cn=Deleteme,"+self.base m = l.add_ext(dn, [ ('objectClass','organizationalRole'), ('cn', 'Deleteme'), ]) result,pmsg,msgid,ctrls = l.result3(m, _ldap.MSG_ALL, self.timeout) self.assertEquals(result, _ldap.RES_ADD) m = l.delete_ext(dn) result,pmsg,msgid,ctrls = l.result3(m, _ldap.MSG_ALL, self.timeout) self.assertEquals(result, _ldap.RES_DELETE) self.assertEquals(msgid, m) self.assertEquals(pmsg, []) self.assertEquals(ctrls, []) def test_modify_no_such_object(self): l = self._init() # try deleting an object that doesn't exist not_found = False m = l.modify_ext("cn=DoesNotExist,"+self.base, [ (_ldap.MOD_ADD, 'description', ['blah']), ]) try: r = l.result3(m, _ldap.MSG_ALL, self.timeout) self.fail(r) except _ldap.NO_SUCH_OBJECT: not_found = True self.assertTrue(not_found) def DISABLED_test_modify_no_such_object_empty_attrs(self): # XXX ldif-backend for slapd appears broken??? l = self._init() # try deleting an object that doesn't exist m = l.modify_ext("cn=DoesNotExist,"+self.base, [ (_ldap.MOD_ADD, 'description', []), ]) self.assertTrue(isinstance(m, int)) r = l.result3(m, _ldap.MSG_ALL, self.timeout) # what should happen?? self.fail(r) def test_modify(self): l = self._init() # first, add an object we will delete dn = "cn=AddToMe,"+self.base m = l.add_ext(dn, [ ('objectClass','person'), ('cn', 'AddToMe'), ('sn', 'Modify'), ('description', 'a description'), ]) result,pmsg,msgid,ctrls = l.result3(m, _ldap.MSG_ALL, self.timeout) self.assertEquals(result, _ldap.RES_ADD) m = l.modify_ext(dn, [ (_ldap.MOD_ADD, 'description', ['b desc', 'c desc']), ]) result,pmsg,msgid,ctrls = l.result3(m, _ldap.MSG_ALL, self.timeout) self.assertEquals(result, _ldap.RES_MODIFY) self.assertEquals(pmsg, []) self.assertEquals(msgid, m) self.assertEquals(ctrls, []) # search for it back m = l.search_ext(self.base, _ldap.SCOPE_SUBTREE, '(cn=AddToMe)') result,pmsg,msgid,ctrls = l.result3(m, _ldap.MSG_ALL, self.timeout) # Expect to get the objects self.assertEquals(result, _ldap.RES_SEARCH_RESULT) self.assertEquals(len(pmsg), 1) self.assertEquals(msgid, m) self.assertEquals(ctrls, []) self.assertEquals(pmsg[0][0], dn) d = list(pmsg[0][1]['description']) d.sort() self.assertEquals(d, ['a description', 'b desc', 'c desc']) def test_rename(self): l = self._init() dn = "cn=RenameMe,"+self.base m = l.add_ext(dn, [ ('objectClass','organizationalRole'), ('cn', 'RenameMe'), ]) result,pmsg,msgid,ctrls = l.result3(m, _ldap.MSG_ALL, self.timeout) self.assertEquals(result, _ldap.RES_ADD) # do the rename with same parent m = l.rename(dn, "cn=IAmRenamed") result,pmsg,msgid,ctrls = l.result3(m, _ldap.MSG_ALL, self.timeout) self.assertEquals(result, _ldap.RES_MODRDN) self.assertEquals(msgid, m) self.assertEquals(pmsg, []) self.assertEquals(ctrls, []) # make sure the old one is gone m = l.search_ext(self.base, _ldap.SCOPE_SUBTREE, '(cn=RenameMe)') result,pmsg,msgid,ctrls = l.result3(m, _ldap.MSG_ALL, self.timeout) self.assertEquals(result, _ldap.RES_SEARCH_RESULT) self.assertEquals(len(pmsg), 0) # expect no results self.assertEquals(msgid, m) self.assertEquals(ctrls, []) # check that the new one looks right dn2 = "cn=IAmRenamed,"+self.base m = l.search_ext(self.base, _ldap.SCOPE_SUBTREE, '(cn=IAmRenamed)') result,pmsg,msgid,ctrls = l.result3(m, _ldap.MSG_ALL, self.timeout) self.assertEquals(result, _ldap.RES_SEARCH_RESULT) self.assertEquals(msgid, m) self.assertEquals(ctrls, []) self.assertEquals(len(pmsg), 1) self.assertEquals(pmsg[0][0], dn2) self.assertEquals(pmsg[0][1]['cn'], ['IAmRenamed']) # create the container containerDn = "ou=RenameContainer,"+self.base m = l.add_ext(containerDn, [ ('objectClass','organizationalUnit'), ('ou', 'RenameContainer'), ]) result,pmsg,msgid,ctrls = l.result3(m, _ldap.MSG_ALL, self.timeout) self.assertEquals(result, _ldap.RES_ADD) # WORKAROUND bug in slapd. (Without an existing child, # renames into a container object do not work for the ldif backend, # the renamed object appears to be deleted, not moved.) # see http://www.openldap.org/its/index.cgi/Software%20Bugs?id=5408 m = l.add_ext("cn=Bogus," + containerDn, [ ('objectClass','organizationalRole'), ('cn', 'Bogus'), ]) result,pmsg,msgid,ctrls = l.result3(m, _ldap.MSG_ALL, self.timeout) self.assertEquals(result, _ldap.RES_ADD) # now rename from dn2 to the conater dn3 = "cn=IAmRenamedAgain," + containerDn # Now try renaming dn2 across container (simultaneous name change) m = l.rename(dn2, "cn=IAmRenamedAgain", containerDn) result,pmsg,msgid,ctrls = l.result3(m, _ldap.MSG_ALL, self.timeout) self.assertEquals(result, _ldap.RES_MODRDN) self.assertEquals(msgid, m) self.assertEquals(pmsg, []) self.assertEquals(ctrls, []) # make sure dn2 is gone m = l.search_ext(self.base, _ldap.SCOPE_SUBTREE, '(cn=IAmRenamed)') result,pmsg,msgid,ctrls = l.result3(m, _ldap.MSG_ALL, self.timeout) self.assertEquals(result, _ldap.RES_SEARCH_RESULT) self.assertEquals(len(pmsg), 0) # expect no results self.assertEquals(msgid, m) self.assertEquals(ctrls, []) m = l.search_ext(self.base, _ldap.SCOPE_SUBTREE, '(objectClass=*)') result,pmsg,msgid,ctrls = l.result3(m, _ldap.MSG_ALL, self.timeout) # make sure dn3 is there m = l.search_ext(self.base, _ldap.SCOPE_SUBTREE, '(cn=IAmRenamedAgain)') result,pmsg,msgid,ctrls = l.result3(m, _ldap.MSG_ALL, self.timeout) self.assertEquals(result, _ldap.RES_SEARCH_RESULT) self.assertEquals(msgid, m) self.assertEquals(ctrls, []) self.assertEquals(len(pmsg), 1) self.assertEquals(pmsg[0][0], dn3) self.assertEquals(pmsg[0][1]['cn'], ['IAmRenamedAgain']) def test_whoami(self): l = self._init() r = l.whoami_s() self.assertEquals("dn:" + self.server.get_root_dn(), r) def test_whoami_unbound(self): l = self._init(bind=False) l.set_option(_ldap.OPT_PROTOCOL_VERSION, _ldap.VERSION3) r = l.whoami_s() self.assertEquals("", r) def test_whoami_anonymous(self): l = self._init(bind=False) l.set_option(_ldap.OPT_PROTOCOL_VERSION, _ldap.VERSION3) # Anonymous bind m = l.simple_bind("", "") result,pmsg,msgid,ctrls = l.result3(m, _ldap.MSG_ALL, self.timeout) self.assertTrue(result, _ldap.RES_BIND) r = l.whoami_s() self.assertEquals("", r) def test_passwd(self): l = self._init() # first, create a user to change password on dn = "cn=PasswordTest," + self.base m = l.add_ext(dn, [ ('objectClass','person'), ('sn', 'PasswordTest'), ('cn', 'PasswordTest'), ('userPassword', 'initial'), ]) result,pmsg,msgid,ctrls = l.result3(m, _ldap.MSG_ALL, self.timeout) self.assertEquals(result, _ldap.RES_ADD) # try changing password with a wrong old-pw m = l.passwd(dn, "bogus", "ignored") try: r = l.result3(m, _ldap.MSG_ALL, self.timeout) self.fail("expected UNWILLING_TO_PERFORM") except _ldap.UNWILLING_TO_PERFORM: pass # try changing password with a correct old-pw m = l.passwd(dn, "initial", "changed") result,pmsg,msgid,ctrls = l.result3(m, _ldap.MSG_ALL, self.timeout) self.assertEquals(msgid, m) self.assertEquals(pmsg, []) self.assertEquals(result, _ldap.RES_EXTENDED) self.assertEquals(ctrls, []) def test_options(self): oldval = _ldap.get_option(_ldap.OPT_PROTOCOL_VERSION) try: try: _ldap.set_option(_ldap.OPT_PROTOCOL_VERSION, "3") self.fail("expected string value to raise a type error") except TypeError: pass _ldap.set_option(_ldap.OPT_PROTOCOL_VERSION, _ldap.VERSION2) v = _ldap.get_option(_ldap.OPT_PROTOCOL_VERSION) self.assertEquals(v, _ldap.VERSION2) _ldap.set_option(_ldap.OPT_PROTOCOL_VERSION, _ldap.VERSION3) v = _ldap.get_option(_ldap.OPT_PROTOCOL_VERSION) self.assertEquals(v, _ldap.VERSION3) finally: _ldap.set_option(_ldap.OPT_PROTOCOL_VERSION, oldval) l = self._init() # Try changing some basic options and checking that they took effect l.set_option(_ldap.OPT_PROTOCOL_VERSION, _ldap.VERSION2) v = l.get_option(_ldap.OPT_PROTOCOL_VERSION) self.assertEquals(v, _ldap.VERSION2) l.set_option(_ldap.OPT_PROTOCOL_VERSION, _ldap.VERSION3) v = l.get_option(_ldap.OPT_PROTOCOL_VERSION) self.assertEquals(v, _ldap.VERSION3) # Try setting options that will yield a known error. try: _ldap.get_option(_ldap.OPT_MATCHED_DN) self.fail("expected ValueError") except ValueError: pass def _require_attr(self, obj, attrname): """Returns true if the attribute exists on the object. This is to allow some tests to be optional, because _ldap is compiled with different properties depending on the underlying C library. This could me made to thrown an exception if you want the tests to be strict.""" if hasattr(obj, attrname): return True #self.fail("required attribute '%s' missing" % attrname) return False def test_sasl(self): l = self._init() if not self._require_attr(l, 'sasl_interactive_bind_s'): # HAVE_SASL return # TODO def test_tls(self): l = self._init() if not self._require_attr(l, 'start_tls_s'): # HAVE_TLS return # TODO def test_cancel(self): l = self._init() if not self._require_attr(l, 'cancel'): # FEATURE_CANCEL return def test_str2dn(self): pass if __name__ == '__main__': unittest.main()
sisap-ics/sisaptools
refs/heads/master
sisaptools/aes.py
1
# -*- coding: utf8 -*- """ Encriptació i desencriptació amb AES. La clau es guarda en un fitxer. """ import base64 import hashlib import os from Crypto.Cipher import AES from Crypto import Random from .constants import APP_CHARSET class AESCipher(object): """ Classe principal. """ def __init__(self): """Captura la clau del fitxer (pot ser de qualsevol longitut).""" if 'AES_KEY' in os.environ: pth = os.environ['AES_KEY'] key = open(pth).read() else: key = 'testkey' self.key = hashlib.sha256(AESCipher.str_to_bytes(key)).digest() self.bs = 32 @staticmethod def str_to_bytes(data): """Converteix unicode a bytes.""" u_type = type(b''.decode(APP_CHARSET)) if isinstance(data, u_type): return data.encode(APP_CHARSET) return data def _pad(self, s): return s + (self.bs - len(s) % self.bs) * \ AESCipher.str_to_bytes(chr(self.bs - len(s) % self.bs)) @staticmethod def _unpad(s): return s[:-ord(s[len(s)-1:])] def encrypt(self, raw): """ Funció per encriptar. Pot rebre unicode o bytes, i retorna unicode. """ raw = self._pad(AESCipher.str_to_bytes(raw)) iv = Random.new().read(AES.block_size) cipher = AES.new(self.key, AES.MODE_CBC, iv) return base64.b64encode(iv + cipher.encrypt(raw)).decode(APP_CHARSET) def decrypt(self, enc): """ Funció per desencriptar. Rep unicode (el que retorna encrypt) i retorna unicode. """ enc = base64.b64decode(enc) iv = enc[:AES.block_size] cipher = AES.new(self.key, AES.MODE_CBC, iv) decrypted = cipher.decrypt(enc[AES.block_size:]) return self._unpad(decrypted).decode(APP_CHARSET)
acourtney2015/boto
refs/heads/develop
boto/cacerts/__init__.py
260
# Copyright 2010 Google Inc. # All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. #
DavidMS51/TFWordclock
refs/heads/master
pixtest1.py
1
from luma.core.interface.serial import spi, noop from luma.core.render import canvas from luma.led_matrix.device import max7219 from time import sleep serial = spi(port=0, device=0, gpio=noop()) device = max7219(serial) #font = ImageFont.truetype("examples/pixelmix.ttf", 8) #with canvas(device) as draw: # draw.rectangle(device.bounding_box, outline="white", fill="black") #sleep(2) import random from PIL import Image image = Image.new('1', (8, 8)) while True: x = random.randint(0,7) y = random.randint(0,7) image.putpixel((x, y), 1) device.display(image) sleep(.05) image.putpixel((x, y), 0) device.display(image)
andela-ifageyinbo/django
refs/heads/master
tests/utils_tests/test_numberformat.py
307
# -*- encoding: utf-8 -*- from __future__ import unicode_literals from decimal import Decimal from sys import float_info from unittest import TestCase from django.utils.numberformat import format as nformat class TestNumberFormat(TestCase): def test_format_number(self): self.assertEqual(nformat(1234, '.'), '1234') self.assertEqual(nformat(1234.2, '.'), '1234.2') self.assertEqual(nformat(1234, '.', decimal_pos=2), '1234.00') self.assertEqual(nformat(1234, '.', grouping=2, thousand_sep=','), '1234') self.assertEqual(nformat(1234, '.', grouping=2, thousand_sep=',', force_grouping=True), '12,34') self.assertEqual(nformat(-1234.33, '.', decimal_pos=1), '-1234.3') def test_format_string(self): self.assertEqual(nformat('1234', '.'), '1234') self.assertEqual(nformat('1234.2', '.'), '1234.2') self.assertEqual(nformat('1234', '.', decimal_pos=2), '1234.00') self.assertEqual(nformat('1234', '.', grouping=2, thousand_sep=','), '1234') self.assertEqual(nformat('1234', '.', grouping=2, thousand_sep=',', force_grouping=True), '12,34') self.assertEqual(nformat('-1234.33', '.', decimal_pos=1), '-1234.3') self.assertEqual(nformat('10000', '.', grouping=3, thousand_sep='comma', force_grouping=True), '10comma000') def test_large_number(self): most_max = ('{}179769313486231570814527423731704356798070567525844996' '598917476803157260780028538760589558632766878171540458953' '514382464234321326889464182768467546703537516986049910576' '551282076245490090389328944075868508455133942304583236903' '222948165808559332123348274797826204144723168738177180919' '29988125040402618412485836{}') most_max2 = ('{}35953862697246314162905484746340871359614113505168999' '31978349536063145215600570775211791172655337563430809179' '07028764928468642653778928365536935093407075033972099821' '15310256415249098018077865788815173701691026788460916647' '38064458963316171186642466965495956524082894463374763543' '61838599762500808052368249716736') int_max = int(float_info.max) self.assertEqual(nformat(int_max, '.'), most_max.format('', '8')) self.assertEqual(nformat(int_max + 1, '.'), most_max.format('', '9')) self.assertEqual(nformat(int_max * 2, '.'), most_max2.format('')) self.assertEqual(nformat(0 - int_max, '.'), most_max.format('-', '8')) self.assertEqual(nformat(-1 - int_max, '.'), most_max.format('-', '9')) self.assertEqual(nformat(-2 * int_max, '.'), most_max2.format('-')) def test_decimal_numbers(self): self.assertEqual(nformat(Decimal('1234'), '.'), '1234') self.assertEqual(nformat(Decimal('1234.2'), '.'), '1234.2') self.assertEqual(nformat(Decimal('1234'), '.', decimal_pos=2), '1234.00') self.assertEqual(nformat(Decimal('1234'), '.', grouping=2, thousand_sep=','), '1234') self.assertEqual(nformat(Decimal('1234'), '.', grouping=2, thousand_sep=',', force_grouping=True), '12,34') self.assertEqual(nformat(Decimal('-1234.33'), '.', decimal_pos=1), '-1234.3') self.assertEqual(nformat(Decimal('0.00000001'), '.', decimal_pos=8), '0.00000001') def test_decimal_subclass(self): class EuroDecimal(Decimal): """ Wrapper for Decimal which prefixes each amount with the € symbol. """ def __format__(self, specifier, **kwargs): amount = super(EuroDecimal, self).__format__(specifier, **kwargs) return '€ {}'.format(amount) price = EuroDecimal('1.23') self.assertEqual(nformat(price, ','), '€ 1,23')
uweschmitt/emzed
refs/heads/master
libms/tools.py
1
#encoding: latin-1 ## start of http://code.activestate.com/recipes/523004/ }}} import gc, sys from types import FrameType def print_cycles(objects=gc.garbage, outstream=sys.stdout, show_progress=False): """ objects: A list of objects to find cycles in. It is often useful to pass in gc.garbage to find the cycles that are preventing some objects from being garbage collected. outstream: The stream for output. show_progress: If True, print the number of objects reached as they are found. """ def print_path(path): for i, step in enumerate(path): # next "wraps around" next = path[(i + 1) % len(path)] outstream.write(" %s -- " % str(type(step))) if isinstance(step, dict): for key, val in step.items(): if val is next: outstream.write("[%s]" % repr(key)) break if key is next: outstream.write("[key] = %s" % repr(val)) break elif isinstance(step, list): outstream.write("[%d]" % step.index(next)) elif isinstance(step, tuple): outstream.write("[%d]" % list(step).index(next)) else: outstream.write(repr(step)) outstream.write(" ->\n") outstream.write("\n") def recurse(obj, start, all, current_path): if show_progress: outstream.write("%d\r" % len(all)) all[id(obj)] = None referents = gc.get_referents(obj) for referent in referents: # If we've found our way back to the start, this is # a cycle, so print it out if referent is start: print_path(current_path) # Don't go back through the original list of objects, or # through temporary references to the object, since those # are just an artifact of the cycle detector itself. elif referent is objects or isinstance(referent, FrameType): continue # We haven't seen this object before, so recurse elif id(referent) not in all: recurse(referent, start, all, current_path + [obj]) for obj in objects: outstream.write("Examining: %r\n" % obj) recurse(obj, obj, { }, []) ## end of http://code.activestate.com/recipes/523004/ }}}
LittleSmaug/summercamp2k17
refs/heads/master
src/physics/body.py
1
from .vector import Vector class Body(object): def __init__(self, m, p=Vector(), v=Vector()): assert m > 0 self.m = m self.forces = [] self.p = p self.v = v self.a = Vector() self.space = None def apply_force(self, force): self.forces.append(force) def reset_forces(self): self.forces = [] def update(self, dt): total_force = sum(self.forces, Vector()) a = total_force / self.m d = self.v * dt + 1.0 / 2 * a * dt * dt self.v += a * dt self.p += d self.reset_forces() def __str__(self): return "Body(p=" + str(self.p) + ", v=" + str(self.v) + ")"
Maccimo/intellij-community
refs/heads/master
python/testData/paramInfo/NestedMultiArg.py
83
# py 2.x only def foo(a, (b, c), d): pass foo(<arg1>1, <arg23>range(2), <arg4>4)
kdwink/intellij-community
refs/heads/master
python/helpers/pydev/tests_runfiles/test_pydevd_property.py
56
''' Created on Aug 22, 2011 @author: hussain.bohra @author: fabioz ''' import os import sys import unittest #======================================================================================================================= # Test #======================================================================================================================= class Test(unittest.TestCase): """Test cases to validate custom property implementation in pydevd """ def setUp(self, nused=None): self.tempdir = os.path.join(os.path.dirname(os.path.dirname(__file__))) sys.path.insert(0, self.tempdir) import pydevd_traceproperty self.old = pydevd_traceproperty.replace_builtin_property() def tearDown(self, unused=None): import pydevd_traceproperty pydevd_traceproperty.replace_builtin_property(self.old) sys.path.remove(self.tempdir) def testProperty(self): """Test case to validate custom property """ import pydevd_traceproperty class TestProperty(object): def __init__(self): self._get = 0 self._set = 0 self._del = 0 def get_name(self): self._get += 1 return self.__name def set_name(self, value): self._set += 1 self.__name = value def del_name(self): self._del += 1 del self.__name name = property(get_name, set_name, del_name, "name's docstring") self.assertEqual(name.__class__, pydevd_traceproperty.DebugProperty) testObj = TestProperty() self._check(testObj) def testProperty2(self): """Test case to validate custom property """ class TestProperty(object): def __init__(self): self._get = 0 self._set = 0 self._del = 0 def name(self): self._get += 1 return self.__name name = property(name) def set_name(self, value): self._set += 1 self.__name = value name.setter(set_name) def del_name(self): self._del += 1 del self.__name name.deleter(del_name) testObj = TestProperty() self._check(testObj) def testProperty3(self): """Test case to validate custom property """ class TestProperty(object): def __init__(self): self._name = 'foo' def name(self): return self._name name = property(name) testObj = TestProperty() self.assertRaises(AttributeError, setattr, testObj, 'name', 'bar') self.assertRaises(AttributeError, delattr, testObj, 'name') def _check(self, testObj): testObj.name = "Custom" self.assertEqual(1, testObj._set) self.assertEqual(testObj.name, "Custom") self.assertEqual(1, testObj._get) self.assert_(hasattr(testObj, 'name')) del testObj.name self.assertEqual(1, testObj._del) self.assert_(not hasattr(testObj, 'name')) testObj.name = "Custom2" self.assertEqual(testObj.name, "Custom2") #======================================================================================================================= # main #======================================================================================================================= if __name__ == '__main__': #this is so that we can run it from the jython tests -- because we don't actually have an __main__ module #(so, it won't try importing the __main__ module) unittest.TextTestRunner().run(unittest.makeSuite(Test))
lmprice/ansible
refs/heads/devel
lib/ansible/plugins/lookup/credstash.py
96
# (c) 2015, Ensighten <infra@ensighten.com> # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ lookup: credstash version_added: "2.0" short_description: retrieve secrets from Credstash on AWS requirements: - credstash (python library) description: - "Credstash is a small utility for managing secrets using AWS's KMS and DynamoDB: https://github.com/fugue/credstash" options: _terms: description: term or list of terms to lookup in the credit store type: list required: True table: description: name of the credstash table to query default: 'credential-store' required: True version: description: Credstash version region: description: AWS region profile_name: description: AWS profile to use for authentication env: - name: AWS_PROFILE aws_access_key_id: description: AWS access key ID env: - name: AWS_ACCESS_KEY_ID aws_secret_access_key: description: AWS access key env: - name: AWS_SECRET_ACCESS_KEY aws_session_token: description: AWS session token env: - name: AWS_SESSION_TOKEN """ EXAMPLES = """ - name: first use credstash to store your secrets shell: credstash put my-github-password secure123 - name: "Test credstash lookup plugin -- get my github password" debug: msg="Credstash lookup! {{ lookup('credstash', 'my-github-password') }}" - name: "Test credstash lookup plugin -- get my other password from us-west-1" debug: msg="Credstash lookup! {{ lookup('credstash', 'my-other-password', region='us-west-1') }}" - name: "Test credstash lookup plugin -- get the company's github password" debug: msg="Credstash lookup! {{ lookup('credstash', 'company-github-password', table='company-passwords') }}" - name: Example play using the 'context' feature hosts: localhost vars: context: app: my_app environment: production tasks: - name: "Test credstash lookup plugin -- get the password with a context passed as a variable" debug: msg="{{ lookup('credstash', 'some-password', context=context) }}" - name: "Test credstash lookup plugin -- get the password with a context defined here" debug: msg="{{ lookup('credstash', 'some-password', context=dict(app='my_app', environment='production')) }}" """ RETURN = """ _raw: description: - value(s) stored in Credstash """ import os from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase CREDSTASH_INSTALLED = False try: import credstash CREDSTASH_INSTALLED = True except ImportError: CREDSTASH_INSTALLED = False class LookupModule(LookupBase): def run(self, terms, variables, **kwargs): if not CREDSTASH_INSTALLED: raise AnsibleError('The credstash lookup plugin requires credstash to be installed.') ret = [] for term in terms: try: version = kwargs.pop('version', '') region = kwargs.pop('region', None) table = kwargs.pop('table', 'credential-store') profile_name = kwargs.pop('profile_name', os.getenv('AWS_PROFILE', None)) aws_access_key_id = kwargs.pop('aws_access_key_id', os.getenv('AWS_ACCESS_KEY_ID', None)) aws_secret_access_key = kwargs.pop('aws_secret_access_key', os.getenv('AWS_SECRET_ACCESS_KEY', None)) aws_session_token = kwargs.pop('aws_session_token', os.getenv('AWS_SESSION_TOKEN', None)) kwargs_pass = {'profile_name': profile_name, 'aws_access_key_id': aws_access_key_id, 'aws_secret_access_key': aws_secret_access_key, 'aws_session_token': aws_session_token} val = credstash.getSecret(term, version, region, table, context=kwargs, **kwargs_pass) except credstash.ItemNotFound: raise AnsibleError('Key {0} not found'.format(term)) except Exception as e: raise AnsibleError('Encountered exception while fetching {0}: {1}'.format(term, e.message)) ret.append(val) return ret
josircg/raizcidadanista
refs/heads/master
raizcidadanista/cms/urls.py
1
# coding: utf-8 from django.conf.urls import patterns, include, url from views import * from forms import CustomPasswordResetForm from cms.sitemap import sitemaps urlpatterns = patterns('', url(r'^$', HomeView.as_view(), name='home'), url(r'^pesquisa/$', SearchView.as_view(), name='search'), url(r'^circulos/$', CirculosView.as_view(), name='circulos'), url(r'^coligacoes/$', ColigacoesView.as_view(), name='coligacoes'), url(r'^articuladores/$', ArticuladoresView.as_view(), name='articuladores'), url(r'^mapa/$', MapaView.as_view(), name='mapa'), url(r'^gts/$', GTsView.as_view(), name='gts'), url(r'^circulos-tematicos/$', CirculosTematicos.as_view(), name='gts'), url(r'^meta/(?P<pk>\d+)/$', MetaView.as_view(), name='meta'), url(r'^meta/(?P<pk>\d+)/depositos/$', MetaDepositosView.as_view(), name='meta_depositos'), url(r'^contato/?$', ContatoView.as_view(), name='contato'), url(r'^section/(?P<slug>[-_\w]+)/$', SectionDetailView.as_view(), name='section'), url(r'^download/(?P<file_uuid>[-_\w]+)/$', FileDownloadView.as_view(), name='download'), url(r'^link/a/(?P<article_slug>[-_\w]+)/$', LinkConversionView.as_view(), name='link'), url(r'^link/s/(?P<section_slug>[-_\w]+)/$', LinkConversionView.as_view(), name='link'), url(r'^sitemap\.xml$', 'django.contrib.sitemaps.views.sitemap', {'sitemaps': sitemaps}, name="sitemap"), url(r'^robots\.txt$', RobotsView.as_view(), name="robots"), url(r'^login/$', LoginView.as_view(), name='cms_login'), url(r'^login/facebook/$', LoginFacebookView.as_view(), name='cms_login_facebook'), url(r'^login/twitter/$', LoginTwitterView.as_view(), name='cms_login_twitter'), url(r'^logout/$', 'django.contrib.auth.views.logout', kwargs={'template_name': 'auth/logout.html',}, name='cms_logout'), url(r'^password_change/$', 'django.contrib.auth.views.password_change', kwargs={'template_name': 'auth/password_change_form.html'}, name='cms_password_change'), url(r'^password_change/done/$', 'django.contrib.auth.views.password_change_done', kwargs={'template_name': 'auth/password_change_done.html',}, name='cms_password_change_done'), url(r'^password_reset/$', 'django.contrib.auth.views.password_reset', kwargs={ 'template_name': 'auth/password_reset_form.html', 'email_template_name': 'auth/password_reset_email.html', 'password_reset_form': CustomPasswordResetForm, }, name='cms_password_reset'), url(r'^password_reset/done/$', 'django.contrib.auth.views.password_reset_done', kwargs={'template_name': 'auth/password_reset_done.html',}, name='cms_password_reset_done'), url(r'^reset/(?P<uidb36>[0-9A-Za-z]{1,13})-(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', 'django.contrib.auth.views.password_reset_confirm', kwargs={'template_name': 'auth/password_reset_confirm.html',}, name='cms_password_reset_confirm'), url(r'^reset/done/$', 'django.contrib.auth.views.password_reset_complete', kwargs={'template_name': 'auth/password_reset_complete.html',}, name='cms_password_reset_complete'), # Encurtador da Proposta url(r'^proposta/(?P<idb36>[0-9A-Za-z]{1,13})/$', PropostaShortView.as_view(), name='proposta_short'), url(r'^(?P<slug>[-_\w]+)/?$', ArticleDetailView.as_view(), name='article'), )
houlixin/BBB-TISDK
refs/heads/master
linux-devkit/sysroots/i686-arago-linux/usr/lib/python2.7/_weakrefset.py
135
# Access WeakSet through the weakref module. # This code is separated-out because it is needed # by abc.py to load everything else at startup. from _weakref import ref __all__ = ['WeakSet'] class _IterationGuard(object): # This context manager registers itself in the current iterators of the # weak container, such as to delay all removals until the context manager # exits. # This technique should be relatively thread-safe (since sets are). def __init__(self, weakcontainer): # Don't create cycles self.weakcontainer = ref(weakcontainer) def __enter__(self): w = self.weakcontainer() if w is not None: w._iterating.add(self) return self def __exit__(self, e, t, b): w = self.weakcontainer() if w is not None: s = w._iterating s.remove(self) if not s: w._commit_removals() class WeakSet(object): def __init__(self, data=None): self.data = set() def _remove(item, selfref=ref(self)): self = selfref() if self is not None: if self._iterating: self._pending_removals.append(item) else: self.data.discard(item) self._remove = _remove # A list of keys to be removed self._pending_removals = [] self._iterating = set() if data is not None: self.update(data) def _commit_removals(self): l = self._pending_removals discard = self.data.discard while l: discard(l.pop()) def __iter__(self): with _IterationGuard(self): for itemref in self.data: item = itemref() if item is not None: yield item def __len__(self): return sum(x() is not None for x in self.data) def __contains__(self, item): try: wr = ref(item) except TypeError: return False return wr in self.data def __reduce__(self): return (self.__class__, (list(self),), getattr(self, '__dict__', None)) __hash__ = None def add(self, item): if self._pending_removals: self._commit_removals() self.data.add(ref(item, self._remove)) def clear(self): if self._pending_removals: self._commit_removals() self.data.clear() def copy(self): return self.__class__(self) def pop(self): if self._pending_removals: self._commit_removals() while True: try: itemref = self.data.pop() except KeyError: raise KeyError('pop from empty WeakSet') item = itemref() if item is not None: return item def remove(self, item): if self._pending_removals: self._commit_removals() self.data.remove(ref(item)) def discard(self, item): if self._pending_removals: self._commit_removals() self.data.discard(ref(item)) def update(self, other): if self._pending_removals: self._commit_removals() if isinstance(other, self.__class__): self.data.update(other.data) else: for element in other: self.add(element) def __ior__(self, other): self.update(other) return self # Helper functions for simple delegating methods. def _apply(self, other, method): if not isinstance(other, self.__class__): other = self.__class__(other) newdata = method(other.data) newset = self.__class__() newset.data = newdata return newset def difference(self, other): return self._apply(other, self.data.difference) __sub__ = difference def difference_update(self, other): if self._pending_removals: self._commit_removals() if self is other: self.data.clear() else: self.data.difference_update(ref(item) for item in other) def __isub__(self, other): if self._pending_removals: self._commit_removals() if self is other: self.data.clear() else: self.data.difference_update(ref(item) for item in other) return self def intersection(self, other): return self._apply(other, self.data.intersection) __and__ = intersection def intersection_update(self, other): if self._pending_removals: self._commit_removals() self.data.intersection_update(ref(item) for item in other) def __iand__(self, other): if self._pending_removals: self._commit_removals() self.data.intersection_update(ref(item) for item in other) return self def issubset(self, other): return self.data.issubset(ref(item) for item in other) __lt__ = issubset def __le__(self, other): return self.data <= set(ref(item) for item in other) def issuperset(self, other): return self.data.issuperset(ref(item) for item in other) __gt__ = issuperset def __ge__(self, other): return self.data >= set(ref(item) for item in other) def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self.data == set(ref(item) for item in other) def symmetric_difference(self, other): return self._apply(other, self.data.symmetric_difference) __xor__ = symmetric_difference def symmetric_difference_update(self, other): if self._pending_removals: self._commit_removals() if self is other: self.data.clear() else: self.data.symmetric_difference_update(ref(item) for item in other) def __ixor__(self, other): if self._pending_removals: self._commit_removals() if self is other: self.data.clear() else: self.data.symmetric_difference_update(ref(item) for item in other) return self def union(self, other): return self._apply(other, self.data.union) __or__ = union def isdisjoint(self, other): return len(self.intersection(other)) == 0
4144414D/pytruecrypt
refs/heads/master
examples/keysearch.py
1
""" keyseaerch helps locate data by attempting to decrypt sectors with supplied keys and seeing how random the resutls are. Encryption modes can be assigned long or short hand where: aes = a twofish = t serpent = s For example 'aes-twofish' can be shortened to 'at' and aes-twofish-serpent to ats. GitHub: https://github.com/4144414D/pytruecrypt Usage: image <source> [-mMODE -aKEY -tKEY -sKEY -oNUM] image --help Options: --help Show this screen -a key, --aes key AES Key -t key, --twofish key Twofish Key -s key, --serpent key Serpent Key -o num, --offset num Decrypt as offset number num [default: 256] -m MODE, --mode mode Encryption mode to use [default: aes] """ from pytruecrypt import * from docopt import docopt import os import binascii import time from util import * import numpy as np import math def checkkey(mode,key,name): if key != None: #check if key is required but not supplied if (name not in mode): print "ERROR " + name + " key is not required for the selected encryption mode" #check key is correct length elif (len(key) == 128): try: #try to convert hex to bin return binascii.unhexlify(key) #good outcome except TypeError: print "ERROR " + name + " key invalid and contains non hex characters [0-f]" #if key is incorrect length give an error elif (len(key) != 128): print "ERROR " + name + " key is not the correct length" else: #check if key is required but not supplied if (name in mode): print "ERROR " + name + " key is required for the selected encryption mode but not supplied" return False #everything else is a fail #jaradc - https://gist.github.com/jaradc/eeddf20932c0347928d0da5a09298147 def entropy(data): """ Computes entropy of label distribution. """ labels = list(data) n_labels = 512.0 value,counts = np.unique(labels, return_counts=True) probs = counts / n_labels n_classes = np.count_nonzero(probs) ent = 0. # Compute entropy for i in probs: ent -= i * math.log(i, 2) return ent if __name__ == '__main__': arguments = docopt(__doc__) #check crypto options if arguments['--mode'] in ["a","at","ats","s","sa","sta","t","ts"]: #viable short hand mode chosen, convert to long hand, this is dumb arguments['--mode'] = arguments['--mode'].replace('a','1').replace('t','2').replace('s','3') arguments['--mode'] = arguments['--mode'].replace('1','aes-').replace('2','twofish-').replace('3','serpent-')[:-1] elif arguments['--mode'] not in ["aes","aes-twofish","aes-twofish-serpent","serpent","serpent-aes","serpent-twofish-aes","twofish","twofish-serpent"]: print "ERROR Please choose a viable crypto mode:" for line in ["aes","aes-twofish","aes-twofish-serpent","serpent","serpent-aes","serpent-twofish-aes","twofish","twofish-serpent"]: print "\t"+line exit(1) #split long hand into list arguments['--mode'] = arguments['--mode'].split('-') #check keys are viable if required key_count = 0 aes_key = None twofish_key = None serpent_key = None aes_key = checkkey(arguments['--mode'],arguments['--aes'],'aes') twofish_key = checkkey(arguments['--mode'],arguments['--twofish'],'twofish') serpent_key = checkkey(arguments['--mode'],arguments['--serpent'],'serpent') if not (aes_key or twofish_key or serpent_key): exit(1) #check source file exists if not os.path.isfile(arguments['<source>']): print "ERROR", print arguments['<source>'], print "does not exist" exit(1) #use PyTruecrypt to open source tc = PyTruecrypt(arguments['<source>'], encryption=arguments['--mode']) #open source with keys tc.open_with_key(aes_key,twofish_key,serpent_key) offset = int(arguments['--offset']) tick = 0 for x in range(0,tc.size,512): tc.fd.seek(x) cipher = tc.fd.read(512) sector = x/512 plain = tc._decrypt_sector(offset,cipher) ent = entropy(plain) if ent < 7: print print "Possible result at sector: {}".format(str(sector)) print hexdump(plain) pause = raw_input("Press enter to contiune...") tick += 1 if tick == 128: tick = 0 percentage = (float(100) / tc.size) * x print "\rSearching... {}%".format(percentage), print "\rSearching... 100% "
rsnakamura/iperflexer
refs/heads/master
tests/testpipe.py
1
from unittest import TestCase from mock import MagicMock from iperflexer.iperfparser import IperfParser from iperflexer.sumparser import SumParser FRAGMENT = """ ------------------------------------------------------------ Client connecting to 192.168.20.99, TCP port 5001 TCP window size: 16.0 KByte (default) ------------------------------------------------------------ [ 6] local 192.168.20.50 port 57069 connected with 192.168.20.99 port 5001 [ 4] local 192.168.20.50 port 57066 connected with 192.168.20.99 port 5001 [ 5] local 192.168.20.50 port 57067 connected with 192.168.20.99 port 5001 [ 3] local 192.168.20.50 port 57068 connected with 192.168.20.99 port 5001 [ ID] Interval Transfer Bandwidth [ 3] 0.0- 1.0 sec 896 KBytes 7.34 Mbits/sec [ 6] 0.0- 1.0 sec 768 KBytes 6.29 Mbits/sec [ 4] 0.0- 1.0 sec 768 KBytes 6.29 Mbits/sec [ 5] 0.0- 1.0 sec 768 KBytes 6.29 Mbits/sec [SUM] 0.0- 1.0 sec 3.12 MBytes 26.2 Mbits/sec [ 3] 1.0- 2.0 sec 768 KBytes 6.29 Mbits/sec [ 4] 1.0- 2.0 sec 768 KBytes 6.29 Mbits/sec """.split("\n") CSV_FRAGMENT = """ 20120912102944,192.168.20.50,56843,192.168.20.99,5001,4,0.0-1.0,786432,6291456 20120912102945,192.168.20.50,56844,192.168.20.99,5001,6,0.0-1.0,655360,5242880 20120912102945,192.168.20.50,56842,192.168.20.99,5001,5,0.0-1.0,655360,5242880 20120912102945,192.168.20.50,56841,192.168.20.99,5001,3,0.0-1.0,655360,5242880 20120912102945,192.168.20.50,0,192.168.20.99,5001,-1,0.0-1.0,2752512,22020096 20120912102946,192.168.20.50,56841,192.168.20.99,5001,3,1.0-2.0,655360,5242880 20120912102946,192.168.20.50,56844,192.168.20.99,5001,6,1.0-2.0,917504,7340032 """.split("\n") class TestPipe(TestCase): def setUp(self): self.parser = IperfParser() return def test_human_pipe(self): target = MagicMock() pipe = self.parser.pipe(target) for line in FRAGMENT: pipe.send(line) expected = 26.2 name, args, kwargs = target.send.mock_calls[0] actual = float(args[0]) self.assertAlmostEqual(expected, actual) return def test_csv(self): target = MagicMock() pipe = self.parser.pipe(target) for line in CSV_FRAGMENT: pipe.send(line) expected = 22.020096 name, args, kwargs = target.send.mock_calls[0] actual = float(args[0]) self.assertAlmostEqual(expected, actual) return # end class TestPipe class TestSumParser(TestCase): def setUp(self): self.target = MagicMock() self.parser = SumParser() self.pipe = self.parser.pipe(self.target) return def test_human_pipe(self): self.parser.reset() target = MagicMock() pipe = self.parser.pipe(target) for line in FRAGMENT: pipe.send(line) expected = 26.2 print target.send.mock_calls name, args, kwargs = target.send.mock_calls[0] actual = float(args[0]) self.assertAlmostEqual(expected, actual) return def test_csv(self): for line in CSV_FRAGMENT: self.pipe.send(line) expected = 22.020096 name, args, kwargs = self.target.send.mock_calls[0] actual = float(args[0]) self.assertAlmostEqual(expected, actual) return # end class TestSumParser if __name__ == "__main__": import pudb; pudb.set_trace() parser = IperfParser() target = MagicMock() pipe = parser.pipe(target) for line in FRAGMENT: pipe.send(line) expected = 26.2 print target.send.mock_calls name, args, kwargs = target.send.mock_calls[0] actual = float(args[0])
AmericanResearchInstitute/poweru-server
refs/heads/master
cmis_storage/views.py
6027
# Create your views here.
EShamaev/ardupilot
refs/heads/master
Tools/scripts/runplanetest.py
37
#!/usr/bin/env python import pexpect, time, sys from pymavlink import mavutil def wait_heartbeat(mav, timeout=10): '''wait for a heartbeat''' start_time = time.time() while time.time() < start_time+timeout: if mav.recv_match(type='HEARTBEAT', blocking=True, timeout=0.5) is not None: return raise Exception("Failed to get heartbeat") def wait_mode(mav, modes, timeout=10): '''wait for one of a set of flight modes''' start_time = time.time() last_mode = None while time.time() < start_time+timeout: wait_heartbeat(mav, timeout=10) if mav.flightmode != last_mode: print("Flightmode %s" % mav.flightmode) last_mode = mav.flightmode if mav.flightmode in modes: return print("Failed to get mode from %s" % modes) sys.exit(1) def wait_time(mav, simtime): '''wait for simulation time to pass''' imu = mav.recv_match(type='RAW_IMU', blocking=True) t1 = imu.time_usec*1.0e-6 while True: imu = mav.recv_match(type='RAW_IMU', blocking=True) t2 = imu.time_usec*1.0e-6 if t2 - t1 > simtime: break cmd = 'sim_vehicle.py -j4 -D -f plane' mavproxy = pexpect.spawn(cmd, logfile=sys.stdout, timeout=30) mavproxy.expect("Ground start complete") mav = mavutil.mavlink_connection('127.0.0.1:14550') wait_mode(mav, ['MANUAL']) mavproxy.send('wp list\n') mavproxy.expect('Requesting') wait_time(mav, 30) mavproxy.send('arm throttle\n') mavproxy.send('auto\n') wait_mode(mav, ['AUTO']) mavproxy.send('module load console\n') mavproxy.send('module load map\n') mavproxy.send('map set showsimpos 1\n') mavproxy.logfile = None mavproxy.interact()
elba7r/builder
refs/heads/master
frappe/desk/doctype/todo/__init__.py
1829
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals
indhub/mxnet
refs/heads/master
python/mxnet/contrib/onnx/onnx2mx/import_model.py
5
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # coding: utf-8 """import function""" # pylint: disable=no-member from .import_onnx import GraphProto def import_model(model_file): """Imports the ONNX model file, passed as a parameter, into MXNet symbol and parameters. Operator support and coverage - https://cwiki.apache.org/confluence/display/MXNET/ONNX Parameters ---------- model_file : str ONNX model file name Returns ------- sym : :class:`~mxnet.symbol.Symbol` MXNet symbol object arg_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray` Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format aux_params : dict of ``str`` to :class:`~mxnet.ndarray.NDArray` Dict of converted parameters stored in ``mxnet.ndarray.NDArray`` format """ graph = GraphProto() try: import onnx except ImportError: raise ImportError("Onnx and protobuf need to be installed. " + "Instructions to install - https://github.com/onnx/onnx") # loads model file and returns ONNX protobuf object model_proto = onnx.load(model_file) sym, arg_params, aux_params = graph.from_onnx(model_proto.graph) return sym, arg_params, aux_params def get_model_metadata(model_file): """ Returns the name and shape information of input and output tensors of the given ONNX model file. Parameters ---------- model_file : str ONNX model file name Returns ------- model_metadata : dict A dictionary object mapping various metadata to its corresponding value. The dictionary will have the following template. { 'input_tensor_data' : <list of tuples representing the shape of the input paramters>, 'output_tensor_data' : <list of tuples representing the shape of the output of the model> } """ graph = GraphProto() try: import onnx except ImportError: raise ImportError("Onnx and protobuf need to be installed. " + "Instructions to install - https://github.com/onnx/onnx") model_proto = onnx.load(model_file) metadata = graph.get_graph_metadata(model_proto.graph) return metadata
jonjonarnearne/smi2021
refs/heads/smi2021-kernel-4.1
tools/perf/scripts/python/netdev-times.py
1544
# Display a process of packets and processed time. # It helps us to investigate networking or network device. # # options # tx: show only tx chart # rx: show only rx chart # dev=: show only thing related to specified device # debug: work with debug mode. It shows buffer status. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * all_event_list = []; # insert all tracepoint event related with this script irq_dic = {}; # key is cpu and value is a list which stacks irqs # which raise NET_RX softirq net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry # and a list which stacks receive receive_hunk_list = []; # a list which include a sequence of receive events rx_skb_list = []; # received packet list for matching # skb_copy_datagram_iovec buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and # tx_xmit_list of_count_rx_skb_list = 0; # overflow count tx_queue_list = []; # list of packets which pass through dev_queue_xmit of_count_tx_queue_list = 0; # overflow count tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit of_count_tx_xmit_list = 0; # overflow count tx_free_list = []; # list of packets which is freed # options show_tx = 0; show_rx = 0; dev = 0; # store a name of device specified by option "dev=" debug = 0; # indices of event_info tuple EINFO_IDX_NAME= 0 EINFO_IDX_CONTEXT=1 EINFO_IDX_CPU= 2 EINFO_IDX_TIME= 3 EINFO_IDX_PID= 4 EINFO_IDX_COMM= 5 # Calculate a time interval(msec) from src(nsec) to dst(nsec) def diff_msec(src, dst): return (dst - src) / 1000000.0 # Display a process of transmitting a packet def print_transmit(hunk): if dev != 0 and hunk['dev'].find(dev) < 0: return print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \ (hunk['dev'], hunk['len'], nsecs_secs(hunk['queue_t']), nsecs_nsecs(hunk['queue_t'])/1000, diff_msec(hunk['queue_t'], hunk['xmit_t']), diff_msec(hunk['xmit_t'], hunk['free_t'])) # Format for displaying rx packet processing PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)" PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)" PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)" PF_JOINT= " |" PF_WJOINT= " | |" PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)" PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)" PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)" PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)" PF_CONS_SKB= " | consume_skb(+%.3fmsec)" # Display a process of received packets and interrputs associated with # a NET_RX softirq def print_receive(hunk): show_hunk = 0 irq_list = hunk['irq_list'] cpu = irq_list[0]['cpu'] base_t = irq_list[0]['irq_ent_t'] # check if this hunk should be showed if dev != 0: for i in range(len(irq_list)): if irq_list[i]['name'].find(dev) >= 0: show_hunk = 1 break else: show_hunk = 1 if show_hunk == 0: return print "%d.%06dsec cpu=%d" % \ (nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu) for i in range(len(irq_list)): print PF_IRQ_ENTRY % \ (diff_msec(base_t, irq_list[i]['irq_ent_t']), irq_list[i]['irq'], irq_list[i]['name']) print PF_JOINT irq_event_list = irq_list[i]['event_list'] for j in range(len(irq_event_list)): irq_event = irq_event_list[j] if irq_event['event'] == 'netif_rx': print PF_NET_RX % \ (diff_msec(base_t, irq_event['time']), irq_event['skbaddr']) print PF_JOINT print PF_SOFT_ENTRY % \ diff_msec(base_t, hunk['sirq_ent_t']) print PF_JOINT event_list = hunk['event_list'] for i in range(len(event_list)): event = event_list[i] if event['event_name'] == 'napi_poll': print PF_NAPI_POLL % \ (diff_msec(base_t, event['event_t']), event['dev']) if i == len(event_list) - 1: print "" else: print PF_JOINT else: print PF_NET_RECV % \ (diff_msec(base_t, event['event_t']), event['skbaddr'], event['len']) if 'comm' in event.keys(): print PF_WJOINT print PF_CPY_DGRAM % \ (diff_msec(base_t, event['comm_t']), event['pid'], event['comm']) elif 'handle' in event.keys(): print PF_WJOINT if event['handle'] == "kfree_skb": print PF_KFREE_SKB % \ (diff_msec(base_t, event['comm_t']), event['location']) elif event['handle'] == "consume_skb": print PF_CONS_SKB % \ diff_msec(base_t, event['comm_t']) print PF_JOINT def trace_begin(): global show_tx global show_rx global dev global debug for i in range(len(sys.argv)): if i == 0: continue arg = sys.argv[i] if arg == 'tx': show_tx = 1 elif arg =='rx': show_rx = 1 elif arg.find('dev=',0, 4) >= 0: dev = arg[4:] elif arg == 'debug': debug = 1 if show_tx == 0 and show_rx == 0: show_tx = 1 show_rx = 1 def trace_end(): # order all events in time all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME], b[EINFO_IDX_TIME])) # process all events for i in range(len(all_event_list)): event_info = all_event_list[i] name = event_info[EINFO_IDX_NAME] if name == 'irq__softirq_exit': handle_irq_softirq_exit(event_info) elif name == 'irq__softirq_entry': handle_irq_softirq_entry(event_info) elif name == 'irq__softirq_raise': handle_irq_softirq_raise(event_info) elif name == 'irq__irq_handler_entry': handle_irq_handler_entry(event_info) elif name == 'irq__irq_handler_exit': handle_irq_handler_exit(event_info) elif name == 'napi__napi_poll': handle_napi_poll(event_info) elif name == 'net__netif_receive_skb': handle_netif_receive_skb(event_info) elif name == 'net__netif_rx': handle_netif_rx(event_info) elif name == 'skb__skb_copy_datagram_iovec': handle_skb_copy_datagram_iovec(event_info) elif name == 'net__net_dev_queue': handle_net_dev_queue(event_info) elif name == 'net__net_dev_xmit': handle_net_dev_xmit(event_info) elif name == 'skb__kfree_skb': handle_kfree_skb(event_info) elif name == 'skb__consume_skb': handle_consume_skb(event_info) # display receive hunks if show_rx: for i in range(len(receive_hunk_list)): print_receive(receive_hunk_list[i]) # display transmit hunks if show_tx: print " dev len Qdisc " \ " netdevice free" for i in range(len(tx_free_list)): print_transmit(tx_free_list[i]) if debug: print "debug buffer status" print "----------------------------" print "xmit Qdisc:remain:%d overflow:%d" % \ (len(tx_queue_list), of_count_tx_queue_list) print "xmit netdevice:remain:%d overflow:%d" % \ (len(tx_xmit_list), of_count_tx_xmit_list) print "receive:remain:%d overflow:%d" % \ (len(rx_skb_list), of_count_rx_skb_list) # called from perf, when it finds a correspoinding event def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, callchain, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, callchain, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm, callchain, irq, irq_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, irq_name) all_event_list.append(event_info) def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, irq, ret): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret) all_event_list.append(event_info) def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, napi, dev_name) all_event_list.append(event_info) def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr, skblen, rc, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, rc ,dev_name) all_event_list.append(event_info) def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr, protocol, location): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, protocol, location) all_event_list.append(event_info) def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr) all_event_list.append(event_info) def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr, skblen): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen) all_event_list.append(event_info) def handle_irq_handler_entry(event_info): (name, context, cpu, time, pid, comm, irq, irq_name) = event_info if cpu not in irq_dic.keys(): irq_dic[cpu] = [] irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time} irq_dic[cpu].append(irq_record) def handle_irq_handler_exit(event_info): (name, context, cpu, time, pid, comm, irq, ret) = event_info if cpu not in irq_dic.keys(): return irq_record = irq_dic[cpu].pop() if irq != irq_record['irq']: return irq_record.update({'irq_ext_t':time}) # if an irq doesn't include NET_RX softirq, drop. if 'event_list' in irq_record.keys(): irq_dic[cpu].append(irq_record) def handle_irq_softirq_raise(event_info): (name, context, cpu, time, pid, comm, vec) = event_info if cpu not in irq_dic.keys() \ or len(irq_dic[cpu]) == 0: return irq_record = irq_dic[cpu].pop() if 'event_list' in irq_record.keys(): irq_event_list = irq_record['event_list'] else: irq_event_list = [] irq_event_list.append({'time':time, 'event':'sirq_raise'}) irq_record.update({'event_list':irq_event_list}) irq_dic[cpu].append(irq_record) def handle_irq_softirq_entry(event_info): (name, context, cpu, time, pid, comm, vec) = event_info net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]} def handle_irq_softirq_exit(event_info): (name, context, cpu, time, pid, comm, vec) = event_info irq_list = [] event_list = 0 if cpu in irq_dic.keys(): irq_list = irq_dic[cpu] del irq_dic[cpu] if cpu in net_rx_dic.keys(): sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t'] event_list = net_rx_dic[cpu]['event_list'] del net_rx_dic[cpu] if irq_list == [] or event_list == 0: return rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time, 'irq_list':irq_list, 'event_list':event_list} # merge information realted to a NET_RX softirq receive_hunk_list.append(rec_data) def handle_napi_poll(event_info): (name, context, cpu, time, pid, comm, napi, dev_name) = event_info if cpu in net_rx_dic.keys(): event_list = net_rx_dic[cpu]['event_list'] rec_data = {'event_name':'napi_poll', 'dev':dev_name, 'event_t':time} event_list.append(rec_data) def handle_netif_rx(event_info): (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info if cpu not in irq_dic.keys() \ or len(irq_dic[cpu]) == 0: return irq_record = irq_dic[cpu].pop() if 'event_list' in irq_record.keys(): irq_event_list = irq_record['event_list'] else: irq_event_list = [] irq_event_list.append({'time':time, 'event':'netif_rx', 'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name}) irq_record.update({'event_list':irq_event_list}) irq_dic[cpu].append(irq_record) def handle_netif_receive_skb(event_info): global of_count_rx_skb_list (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info if cpu in net_rx_dic.keys(): rec_data = {'event_name':'netif_receive_skb', 'event_t':time, 'skbaddr':skbaddr, 'len':skblen} event_list = net_rx_dic[cpu]['event_list'] event_list.append(rec_data) rx_skb_list.insert(0, rec_data) if len(rx_skb_list) > buffer_budget: rx_skb_list.pop() of_count_rx_skb_list += 1 def handle_net_dev_queue(event_info): global of_count_tx_queue_list (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time} tx_queue_list.insert(0, skb) if len(tx_queue_list) > buffer_budget: tx_queue_list.pop() of_count_tx_queue_list += 1 def handle_net_dev_xmit(event_info): global of_count_tx_xmit_list (name, context, cpu, time, pid, comm, skbaddr, skblen, rc, dev_name) = event_info if rc == 0: # NETDEV_TX_OK for i in range(len(tx_queue_list)): skb = tx_queue_list[i] if skb['skbaddr'] == skbaddr: skb['xmit_t'] = time tx_xmit_list.insert(0, skb) del tx_queue_list[i] if len(tx_xmit_list) > buffer_budget: tx_xmit_list.pop() of_count_tx_xmit_list += 1 return def handle_kfree_skb(event_info): (name, context, cpu, time, pid, comm, skbaddr, protocol, location) = event_info for i in range(len(tx_queue_list)): skb = tx_queue_list[i] if skb['skbaddr'] == skbaddr: del tx_queue_list[i] return for i in range(len(tx_xmit_list)): skb = tx_xmit_list[i] if skb['skbaddr'] == skbaddr: skb['free_t'] = time tx_free_list.append(skb) del tx_xmit_list[i] return for i in range(len(rx_skb_list)): rec_data = rx_skb_list[i] if rec_data['skbaddr'] == skbaddr: rec_data.update({'handle':"kfree_skb", 'comm':comm, 'pid':pid, 'comm_t':time}) del rx_skb_list[i] return def handle_consume_skb(event_info): (name, context, cpu, time, pid, comm, skbaddr) = event_info for i in range(len(tx_xmit_list)): skb = tx_xmit_list[i] if skb['skbaddr'] == skbaddr: skb['free_t'] = time tx_free_list.append(skb) del tx_xmit_list[i] return def handle_skb_copy_datagram_iovec(event_info): (name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info for i in range(len(rx_skb_list)): rec_data = rx_skb_list[i] if skbaddr == rec_data['skbaddr']: rec_data.update({'handle':"skb_copy_datagram_iovec", 'comm':comm, 'pid':pid, 'comm_t':time}) del rx_skb_list[i] return
Azure/azure-sdk-for-python
refs/heads/sync-eng/common-js-nightly-docs-2-1768-ForTestPipeline
sdk/baremetalinfrastructure/azure-mgmt-baremetalinfrastructure/azure/mgmt/baremetalinfrastructure/_version.py
9
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- VERSION = "1.0.0b2"
jackkiej/SickRage
refs/heads/master
lib/pgi/obj.py
18
# Copyright 2012, 2013 Christoph Reiter # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. import itertools from ctypes import cast, addressof import weakref from .clib import gobject from .clib.gobject import GClosureNotify, signal_connect_data from .clib.gobject import signal_handler_unblock, signal_handler_block from .clib.gobject import GConnectFlags, signal_handler_disconnect from .clib.gir import GIFunctionInfoFlags from .util import import_attribute, escape_identifier from .gtype import PGType from .properties import PropertyAttribute, PROPS_NAME from .field import FieldAttribute from .constant import ConstantAttribute from .signals import SignalsAttribute from .codegen import generate_function, generate_constructor from .codegen import generate_signal_callback, generate_dummy_callable from ._compat import PY3 class Object(object): __gtype__ = None _obj = 0 __weak = {} _constructors = None __signal_cb_ref = {} def __init__(self, **kwargs): gtype = self.__gtype__ if gtype.is_abstract(): raise TypeError("Cannot create instance of abstract type %r" % gtype.name) names = kwargs.keys() obj = self._generate_constructor(tuple(names))(*kwargs.values()) # sink unowned objects if self._unowned: gobject.ref_sink(obj) self.__weak[weakref.ref(self, self.__destroy)] = obj self._obj = obj @classmethod def _generate_constructor(cls, names): """Get a hopefully cache constructor""" cache = cls._constructors if names in cache: return cache[names] elif len(cache) > 3: cache.clear() func = generate_constructor(cls, names) cache[names] = func return func def set_property(self, name, value): """set_property(property_name: str, value: object) Set property *property_name* to *value*. """ if not hasattr(self.props, name): raise TypeError("Unknown property: %r" % name) setattr(self.props, name, value) def get_property(self, name): """get_property(property_name: str) -> object Retrieves a property value. """ if not hasattr(self.props, name): raise TypeError("Unknown property: %r" % name) return getattr(self.props, name) def _ref(self): gobject.ref_sink(self._obj) @classmethod def __destroy(cls, ref): gobject.unref(cls.__weak.pop(ref)) @property def __grefcount__(self): return cast(self._obj, gobject.GObjectPtr).contents.ref_count def __get_signal(self, name): name = name.replace("_", "-") for base in type(self).__mro__[:-1]: if base is InterfaceBase: continue if "__sigs__" in base.__dict__: if name in base.__sigs__: return base.__sigs__[name] def __connect(self, flags, name, callback, *user_args): if not callable(callback): raise TypeError("second argument must be callable") info = self.__get_signal(name) if not info: raise TypeError("unknown signal name %r" % name) def _add_self(*args): return callback(self, *itertools.chain(args, user_args)) cb = generate_signal_callback(info)(_add_self) destroy = GClosureNotify() if PY3: name = name.encode("ascii") id_ = signal_connect_data(self._obj, name, cb, None, destroy, flags) self.__signal_cb_ref[id_] = (cb, destroy) return id_ def connect(self, detailed_signal, handler, *args): """connect(detailed_signal: str, handler: function, *args) -> handler_id: int The connect() method adds a function or method (handler) to the end of the list of signal handlers for the named detailed_signal but before the default class signal handler. An optional set of parameters may be specified after the handler parameter. These will all be passed to the signal handler when invoked. For example if a function handler was connected to a signal using:: handler_id = object.connect("signal_name", handler, arg1, arg2, arg3) The handler should be defined as:: def handler(object, arg1, arg2, arg3): A method handler connected to a signal using:: handler_id = object.connect("signal_name", self.handler, arg1, arg2) requires an additional argument when defined:: def handler(self, object, arg1, arg2) A TypeError exception is raised if detailed_signal identifies a signal name that is not associated with the object. """ return self.__connect(0, detailed_signal, handler, *args) def connect_after(self, detailed_signal, handler, *args): """connect_after(detailed_signal: str, handler: function, *args) -> handler_id: int The connect_after() method is similar to the connect() method except that the handler is added to the signal handler list after the default class signal handler. Otherwise the details of handler definition and invocation are the same. """ flags = GConnectFlags.CONNECT_AFTER return self.__connect(flags, detailed_signal, handler, *args) def disconnect(self, id_): if id_ in self.__signal_cb_ref: signal_handler_disconnect(self._obj, id_) del self.__signal_cb_ref[id_] def handler_block(self, handler_id): """handler_block(handler_id: int) -> None Blocks a handler of an instance so it will not be called during any signal emissions unless :meth:`handler_unblock` is called for that *handler_id*. Thus "blocking" a signal handler means to temporarily deactivate it, a signal handler has to be unblocked exactly the same amount of times it has been blocked before to become active again. It is recommended to use :meth:`handler_block` in conjunction with the *with* statement which will call :meth:`handler_unblock` implicitly at the end of the block:: with an_object.handler_block(handler_id): # Do your work here ... """ signal_handler_block(self._obj, handler_id) def handler_unblock(self, handler_id): """handler_unblock(handler_id: int) -> None""" signal_handler_unblock(self._obj, handler_id) def emit(self, signal_name, *args): """emit(signal_name: str, *args) -> None Emit signal *signal_name*. Signal arguments must follow, e.g. if your signal is of type ``(int,)``, it must be emitted with:: self.emit(signal_name, 42) """ raise NotImplementedError def freeze_notify(self): """freeze_notify() -> None This method freezes all the "notify::" signals (which are emitted when any property is changed) until the :meth:`thaw_notify` method is called. It recommended to use the *with* statement when calling :meth:`freeze_notify`, that way it is ensured that :meth:`thaw_notify` is called implicitly at the end of the block:: with an_object.freeze_notify(): # Do your work here ... """ raise NotImplementedError def thaw_notify(self): """thaw_notify() -> None Thaw all the "notify::" signals which were thawed by :meth:`freeze_notify`. It is recommended to not call :meth:`thaw_notify` explicitly but use :meth:`freeze_notify` together with the *with* statement. """ raise NotImplementedError def __hash__(self): return hash(self._obj) def __eq__(self, other): return self._obj == other._obj def __cmp__(self, other): return cmp(self._obj, other._obj) def __repr__(self): form = "<%s object at 0x%x (%s at 0x%x)>" name = type(self).__name__ return form % (name, id(self), self.__gtype__.name, self._obj) class MethodAttribute(object): def __init__(self, info, real_owner, name): super(MethodAttribute, self).__init__() self._info = info self._name = name self._real_owner = real_owner def __get__(self, instance, owner): info = self._info real_owner = self._real_owner flags = info.flags func_flags = flags.value name = self._name func_flags = func_flags & (~GIFunctionInfoFlags.THROWS) if func_flags & GIFunctionInfoFlags.IS_METHOD: func = generate_function(info, method=True) setattr(real_owner, name, func) return getattr(instance or owner, name) elif not func_flags or func_flags & GIFunctionInfoFlags.IS_CONSTRUCTOR: func = generate_function(info, method=False) func = staticmethod(func) setattr(real_owner, name, func) return getattr(owner, name) else: raise NotImplementedError("%r not supported" % flags) class VirtualMethodAttribute(object): def __init__(self, info, real_owner, name): super(VirtualMethodAttribute, self).__init__() self._info = info self._name = name self._real_owner = real_owner def __get__(self, instance, owner): info = self._info real_owner = self._real_owner name = self._name # fixme: generate_callback just gives us a docstring func = generate_dummy_callable(info, name, method=True) func._is_virtual = True setattr(real_owner, name, func) return getattr(instance or owner, name) def add_method(info, target_cls, virtual=False, dont_replace=False): """Add a method to the target class""" # escape before prefixing, like pygobject name = escape_identifier(info.name) if virtual: name = "do_" + name attr = VirtualMethodAttribute(info, target_cls, name) else: attr = MethodAttribute(info, target_cls, name) if dont_replace and hasattr(target_cls, name): return setattr(target_cls, name, attr) class InterfaceBase(object): @classmethod def _get_iface_struct(*args): return InterfaceBase.__module__ = "GObject" InterfaceBase.__name__ = "GInterface" class _Interface(object): def __init__(self): raise NotImplementedError("Interface can not be constructed") def InterfaceAttribute(iface_info): """Creates a GInterface class""" # Create a new class cls = type(iface_info.name, (InterfaceBase,), dict(_Interface.__dict__)) cls.__module__ = iface_info.namespace # GType cls.__gtype__ = PGType(iface_info.g_type) # Properties cls.props = PropertyAttribute(iface_info) # Signals cls.signals = SignalsAttribute(iface_info) # Add constants for constant in iface_info.get_constants(): constant_name = constant.name attr = ConstantAttribute(constant) setattr(cls, constant_name, attr) # Add methods for method_info in iface_info.get_methods(): add_method(method_info, cls) # VFuncs for vfunc_info in iface_info.get_vfuncs(): add_method(vfunc_info, cls, virtual=True) cls._sigs = {} is_info = iface_info.get_iface_struct() if is_info: iface_struct = import_attribute(is_info.namespace, is_info.name) else: iface_struct = None def get_iface_struct(cls): if not iface_struct: return None ptr = cls.__gtype__._type.default_interface_ref() if not ptr: return None return iface_struct._from_pointer(addressof(ptr.contents)) setattr(cls, "_get_iface_struct", classmethod(get_iface_struct)) return cls def new_class_from_gtype(gtype): """Create a new class for a gtype not in the gir. The caller is responsible for caching etc. """ if gtype.is_a(PGType.from_name("GObject")): parent = gtype.parent.pytype if parent is None or parent == PGType.from_name("void"): return interfaces = [i.pytype for i in gtype.interfaces] bases = tuple([parent] + interfaces) cls = type(gtype.name, bases, dict()) cls.__gtype__ = gtype return cls elif gtype.is_a(PGType.from_name("GEnum")): from pgi.enum import GEnumBase return GEnumBase def ObjectAttribute(obj_info): """Creates a GObject class. It inherits from the base class and all interfaces it implements. """ if obj_info.name == "Object" and obj_info.namespace == "GObject": cls = Object else: # Get the parent class parent_obj = obj_info.get_parent() if parent_obj: attr = import_attribute(parent_obj.namespace, parent_obj.name) bases = (attr,) else: bases = (object,) # Get all object interfaces ifaces = [] for interface in obj_info.get_interfaces(): attr = import_attribute(interface.namespace, interface.name) # only add interfaces if the base classes don't have it for base in bases: if attr in base.__mro__: break else: ifaces.append(attr) # Combine them to a base class list if ifaces: bases = tuple(list(bases) + ifaces) # Create a new class cls = type(obj_info.name, bases, dict()) cls.__module__ = obj_info.namespace # Set root to unowned= False and InitiallyUnowned=True if obj_info.namespace == "GObject": if obj_info.name == "InitiallyUnowned": cls._unowned = True elif obj_info.name == "Object": cls._unowned = False # GType cls.__gtype__ = PGType(obj_info.g_type) if not obj_info.fundamental: # Constructor cache cls._constructors = {} # Properties setattr(cls, PROPS_NAME, PropertyAttribute(obj_info)) # Signals cls.signals = SignalsAttribute(obj_info) # Signals cls.__sigs__ = {} for sig_info in obj_info.get_signals(): signal_name = sig_info.name cls.__sigs__[signal_name] = sig_info # Add constants for constant in obj_info.get_constants(): constant_name = constant.name attr = ConstantAttribute(constant) setattr(cls, constant_name, attr) # Fields for field in obj_info.get_fields(): field_name = escape_identifier(field.name) attr = FieldAttribute(field_name, field) setattr(cls, field_name, attr) # Add methods for method_info in obj_info.get_methods(): # we implement most of the base object ourself add_method(method_info, cls, dont_replace=cls is Object) # VFuncs for vfunc_info in obj_info.get_vfuncs(): add_method(vfunc_info, cls, virtual=True) cs_info = obj_info.get_class_struct() if cs_info: class_struct = import_attribute(cs_info.namespace, cs_info.name) else: class_struct = None # XXX ^ 2 def get_class_struct(cls, type_=None): """Returns the class struct casted to the passed type""" if type_ is None: type_ = class_struct if type_ is None: return None ptr = cls.__gtype__._type.class_ref() return type_._from_pointer(ptr) setattr(cls, "_get_class_struct", classmethod(get_class_struct)) return cls
laszlocsomor/tensorflow
refs/heads/master
tensorflow/contrib/mpi_collectives/mpi_allgather_test.py
69
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import numpy as np import tensorflow as tf import tensorflow.contrib.mpi_collectives as mpi from tensorflow.python.platform import test average_allgather = False class AllgatherTest(test.TestCase): def checkAllgather(self, num_ranks, all_gathered, local_gathered): # Ensure that indices match. all_gat_ind = np.sort(all_gathered.indices) loc_gat_ind = np.sort(local_gathered.indices) assert(len(loc_gat_ind) == len(all_gat_ind)) for i in range(len(loc_gat_ind)): assert(loc_gat_ind[i] == all_gat_ind[i]) # For each index, verify same values. local_checked = [] for i in range(len(local_gathered.indices)): local_checked.append(False) for i in range(len(all_gathered.indices)): all_index = all_gathered.indices[i] # TODO(jthestness): Make this lookup quicker using sorting. loc_index = -1 for j in range(len(local_gathered.indices)): if local_gathered.indices[j] == all_index and not local_checked[j]: loc_index = j local_checked[j] = True break assert(loc_index >= 0) correct_output = local_gathered.values[loc_index][0] if average_allgather: correct_output = correct_output / float(num_ranks) assert(all_gathered.values[i][0] == correct_output) def test_mpi_allgather(self): # Get MPI rank my_rank = int(os.environ['PMI_RANK']) num_ranks = int(os.environ['PMI_SIZE']) indices_per_rank = 100 tensor_width = 10 # Create IndexedSlices for each rank, some with overlapping indices. to_gather_indices = [] to_gather_values = [] to_gather = [] for rank_id in range(num_ranks): indices = [] values = [] my_multiple = rank_id + 1 current_index = my_multiple for i in range(indices_per_rank): indices.append(current_index) ones_tensor = tf.ones([tensor_width]) values.append(tf.multiply(ones_tensor, tf.fill(ones_tensor.get_shape(), float(current_index)))) current_index += my_multiple concat_ind = tf.stack(indices) concat_vals = tf.stack(values) to_gather_indices.append(concat_ind) to_gather_values.append(concat_vals) to_gather.append(tf.IndexedSlices(concat_vals, concat_ind)) # Collect the local IndexedSlices (indices and values) to create # correct IndexedSlices output. correct_gather_indices = tf.concat(to_gather_indices, 0) correct_gather_values = tf.concat(to_gather_values, 0) correct_gather = tf.IndexedSlices(correct_gather_values, correct_gather_indices) all_gather = mpi.allreduce(to_gather[my_rank], average_allgather) # NOTE: This assumes that device IDs are numbered the same as ranks. gpu_options = tf.GPUOptions(visible_device_list=str(my_rank)) config = tf.ConfigProto(gpu_options=gpu_options) # MPI Session to test allgather. with mpi.Session(config=config) as sess: sess.run(tf.global_variables_initializer()) all_gathered, local_gathered = sess.run([all_gather, correct_gather]) # Compare all_gathered with local_gathered. self.checkAllgather(num_ranks, all_gathered, local_gathered) if __name__ == '__main__': test.main()
ssbarnea/ansible
refs/heads/devel
test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/main.py
6
# -*- coding: utf-8 -*- # # Copyright (C) 2015 Matt Martz <matt@sivel.net> # Copyright (C) 2015 Rackspace US, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import abc import argparse import ast import datetime import json import errno import os import re import subprocess import sys import tempfile import traceback from collections import OrderedDict from contextlib import contextmanager from distutils.version import StrictVersion, LooseVersion from fnmatch import fnmatch import yaml from ansible import __version__ as ansible_version from ansible.executor.module_common import REPLACER_WINDOWS from ansible.module_utils.common._collections_compat import Mapping from ansible.plugins.loader import fragment_loader from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder from ansible.utils.plugin_docs import REJECTLIST, add_collection_to_versions_and_dates, add_fragments, get_docstring from ansible.utils.version import SemanticVersion from .module_args import AnsibleModuleImportError, AnsibleModuleNotInitialized, get_argument_spec from .schema import ansible_module_kwargs_schema, doc_schema, return_schema from .utils import CaptureStd, NoArgsAnsibleModule, compare_unordered_lists, is_empty, parse_yaml, parse_isodate from voluptuous.humanize import humanize_error from ansible.module_utils.six import PY3, with_metaclass, string_types if PY3: # Because there is no ast.TryExcept in Python 3 ast module TRY_EXCEPT = ast.Try # REPLACER_WINDOWS from ansible.executor.module_common is byte # string but we need unicode for Python 3 REPLACER_WINDOWS = REPLACER_WINDOWS.decode('utf-8') else: TRY_EXCEPT = ast.TryExcept REJECTLIST_DIRS = frozenset(('.git', 'test', '.github', '.idea')) INDENT_REGEX = re.compile(r'([\t]*)') TYPE_REGEX = re.compile(r'.*(if|or)(\s+[^"\']*|\s+)(?<!_)(?<!str\()type\([^)].*') SYS_EXIT_REGEX = re.compile(r'[^#]*sys.exit\s*\(.*') REJECTLIST_IMPORTS = { 'requests': { 'new_only': True, 'error': { 'code': 'use-module-utils-urls', 'msg': ('requests import found, should use ' 'ansible.module_utils.urls instead') } }, r'boto(?:\.|$)': { 'new_only': True, 'error': { 'code': 'use-boto3', 'msg': 'boto import found, new modules should use boto3' } }, } SUBPROCESS_REGEX = re.compile(r'subprocess\.Po.*') OS_CALL_REGEX = re.compile(r'os\.call.*') LOOSE_ANSIBLE_VERSION = LooseVersion('.'.join(ansible_version.split('.')[:3])) def compare_dates(d1, d2): try: date1 = parse_isodate(d1, allow_date=True) date2 = parse_isodate(d2, allow_date=True) return date1 == date2 except ValueError: # At least one of d1 and d2 cannot be parsed. Simply compare values. return d1 == d2 class ReporterEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, Exception): return str(o) return json.JSONEncoder.default(self, o) class Reporter: def __init__(self): self.files = OrderedDict() def _ensure_default_entry(self, path): try: self.files[path] except KeyError: self.files[path] = { 'errors': [], 'warnings': [], 'traces': [], 'warning_traces': [] } def _log(self, path, code, msg, level='error', line=0, column=0): self._ensure_default_entry(path) lvl_dct = self.files[path]['%ss' % level] lvl_dct.append({ 'code': code, 'msg': msg, 'line': line, 'column': column }) def error(self, *args, **kwargs): self._log(*args, level='error', **kwargs) def warning(self, *args, **kwargs): self._log(*args, level='warning', **kwargs) def trace(self, path, tracebk): self._ensure_default_entry(path) self.files[path]['traces'].append(tracebk) def warning_trace(self, path, tracebk): self._ensure_default_entry(path) self.files[path]['warning_traces'].append(tracebk) @staticmethod @contextmanager def _output_handle(output): if output != '-': handle = open(output, 'w+') else: handle = sys.stdout yield handle handle.flush() handle.close() @staticmethod def _filter_out_ok(reports): temp_reports = OrderedDict() for path, report in reports.items(): if report['errors'] or report['warnings']: temp_reports[path] = report return temp_reports def plain(self, warnings=False, output='-'): """Print out the test results in plain format output is ignored here for now """ ret = [] for path, report in Reporter._filter_out_ok(self.files).items(): traces = report['traces'][:] if warnings and report['warnings']: traces.extend(report['warning_traces']) for trace in traces: print('TRACE:') print('\n '.join((' %s' % trace).splitlines())) for error in report['errors']: error['path'] = path print('%(path)s:%(line)d:%(column)d: E%(code)s %(msg)s' % error) ret.append(1) if warnings: for warning in report['warnings']: warning['path'] = path print('%(path)s:%(line)d:%(column)d: W%(code)s %(msg)s' % warning) return 3 if ret else 0 def json(self, warnings=False, output='-'): """Print out the test results in json format warnings is not respected in this output """ ret = [len(r['errors']) for r in self.files.values()] with Reporter._output_handle(output) as handle: print(json.dumps(Reporter._filter_out_ok(self.files), indent=4, cls=ReporterEncoder), file=handle) return 3 if sum(ret) else 0 class Validator(with_metaclass(abc.ABCMeta, object)): """Validator instances are intended to be run on a single object. if you are scanning multiple objects for problems, you'll want to have a separate Validator for each one.""" def __init__(self, reporter=None): self.reporter = reporter @abc.abstractproperty def object_name(self): """Name of the object we validated""" pass @abc.abstractproperty def object_path(self): """Path of the object we validated""" pass @abc.abstractmethod def validate(self): """Run this method to generate the test results""" pass class ModuleValidator(Validator): REJECTLIST_PATTERNS = ('.git*', '*.pyc', '*.pyo', '.*', '*.md', '*.rst', '*.txt') REJECTLIST_FILES = frozenset(('.git', '.gitignore', '.travis.yml', 'shippable.yml', '.gitattributes', '.gitmodules', 'COPYING', '__init__.py', 'VERSION', 'test-docs.sh')) REJECTLIST = REJECTLIST_FILES.union(REJECTLIST['MODULE']) PS_DOC_REJECTLIST = frozenset(( 'async_status.ps1', 'slurp.ps1', 'setup.ps1' )) # win_dsc is a dynamic arg spec, the docs won't ever match PS_ARG_VALIDATE_REJECTLIST = frozenset(('win_dsc.ps1', )) ACCEPTLIST_FUTURE_IMPORTS = frozenset(('absolute_import', 'division', 'print_function')) def __init__(self, path, analyze_arg_spec=False, collection=None, collection_version=None, base_branch=None, git_cache=None, reporter=None, routing=None): super(ModuleValidator, self).__init__(reporter=reporter or Reporter()) self.path = path self.basename = os.path.basename(self.path) self.name = os.path.splitext(self.basename)[0] self.analyze_arg_spec = analyze_arg_spec self._Version = LooseVersion self._StrictVersion = StrictVersion self.collection = collection self.collection_name = 'ansible.builtin' if self.collection: self._Version = SemanticVersion self._StrictVersion = SemanticVersion collection_namespace_path, collection_name = os.path.split(self.collection) self.collection_name = '%s.%s' % (os.path.basename(collection_namespace_path), collection_name) self.routing = routing self.collection_version = None if collection_version is not None: self.collection_version_str = collection_version self.collection_version = SemanticVersion(collection_version) self.base_branch = base_branch self.git_cache = git_cache or GitCache() self._python_module_override = False with open(path) as f: self.text = f.read() self.length = len(self.text.splitlines()) try: self.ast = ast.parse(self.text) except Exception: self.ast = None if base_branch: self.base_module = self._get_base_file() else: self.base_module = None def _create_version(self, v, collection_name=None): if not v: raise ValueError('Empty string is not a valid version') if collection_name == 'ansible.builtin': return LooseVersion(v) if collection_name is not None: return SemanticVersion(v) return self._Version(v) def _create_strict_version(self, v, collection_name=None): if not v: raise ValueError('Empty string is not a valid version') if collection_name == 'ansible.builtin': return StrictVersion(v) if collection_name is not None: return SemanticVersion(v) return self._StrictVersion(v) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): if not self.base_module: return try: os.remove(self.base_module) except Exception: pass @property def object_name(self): return self.basename @property def object_path(self): return self.path def _get_collection_meta(self): """Implement if we need this for version_added comparisons """ pass def _python_module(self): if self.path.endswith('.py') or self._python_module_override: return True return False def _powershell_module(self): if self.path.endswith('.ps1'): return True return False def _just_docs(self): """Module can contain just docs and from __future__ boilerplate """ try: for child in self.ast.body: if not isinstance(child, ast.Assign): # allowed from __future__ imports if isinstance(child, ast.ImportFrom) and child.module == '__future__': for future_import in child.names: if future_import.name not in self.ACCEPTLIST_FUTURE_IMPORTS: break else: continue return False return True except AttributeError: return False def _get_base_branch_module_path(self): """List all paths within lib/ansible/modules to try and match a moved module""" return self.git_cache.base_module_paths.get(self.object_name) def _has_alias(self): """Return true if the module has any aliases.""" return self.object_name in self.git_cache.head_aliased_modules def _get_base_file(self): # In case of module moves, look for the original location base_path = self._get_base_branch_module_path() command = ['git', 'show', '%s:%s' % (self.base_branch, base_path or self.path)] p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if int(p.returncode) != 0: return None t = tempfile.NamedTemporaryFile(delete=False) t.write(stdout) t.close() return t.name def _is_new_module(self): if self._has_alias(): return False return not self.object_name.startswith('_') and bool(self.base_branch) and not bool(self.base_module) def _check_interpreter(self, powershell=False): if powershell: if not self.text.startswith('#!powershell\n'): self.reporter.error( path=self.object_path, code='missing-powershell-interpreter', msg='Interpreter line is not "#!powershell"' ) return if not self.text.startswith('#!/usr/bin/python'): self.reporter.error( path=self.object_path, code='missing-python-interpreter', msg='Interpreter line is not "#!/usr/bin/python"', ) def _check_type_instead_of_isinstance(self, powershell=False): if powershell: return for line_no, line in enumerate(self.text.splitlines()): typekeyword = TYPE_REGEX.match(line) if typekeyword: # TODO: add column self.reporter.error( path=self.object_path, code='unidiomatic-typecheck', msg=('Type comparison using type() found. ' 'Use isinstance() instead'), line=line_no + 1 ) def _check_for_sys_exit(self): # Optimize out the happy path if 'sys.exit' not in self.text: return for line_no, line in enumerate(self.text.splitlines()): sys_exit_usage = SYS_EXIT_REGEX.match(line) if sys_exit_usage: # TODO: add column self.reporter.error( path=self.object_path, code='use-fail-json-not-sys-exit', msg='sys.exit() call found. Should be exit_json/fail_json', line=line_no + 1 ) def _check_gpl3_header(self): header = '\n'.join(self.text.split('\n')[:20]) if ('GNU General Public License' not in header or ('version 3' not in header and 'v3.0' not in header)): self.reporter.error( path=self.object_path, code='missing-gplv3-license', msg='GPLv3 license header not found in the first 20 lines of the module' ) elif self._is_new_module(): if len([line for line in header if 'GNU General Public License' in line]) > 1: self.reporter.error( path=self.object_path, code='use-short-gplv3-license', msg='Found old style GPLv3 license header: ' 'https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_documenting.html#copyright' ) def _check_for_subprocess(self): for child in self.ast.body: if isinstance(child, ast.Import): if child.names[0].name == 'subprocess': for line_no, line in enumerate(self.text.splitlines()): sp_match = SUBPROCESS_REGEX.search(line) if sp_match: self.reporter.error( path=self.object_path, code='use-run-command-not-popen', msg=('subprocess.Popen call found. Should be module.run_command'), line=(line_no + 1), column=(sp_match.span()[0] + 1) ) def _check_for_os_call(self): if 'os.call' in self.text: for line_no, line in enumerate(self.text.splitlines()): os_call_match = OS_CALL_REGEX.search(line) if os_call_match: self.reporter.error( path=self.object_path, code='use-run-command-not-os-call', msg=('os.call() call found. Should be module.run_command'), line=(line_no + 1), column=(os_call_match.span()[0] + 1) ) def _find_blacklist_imports(self): for child in self.ast.body: names = [] if isinstance(child, ast.Import): names.extend(child.names) elif isinstance(child, TRY_EXCEPT): bodies = child.body for handler in child.handlers: bodies.extend(handler.body) for grandchild in bodies: if isinstance(grandchild, ast.Import): names.extend(grandchild.names) for name in names: # TODO: Add line/col for blacklist_import, options in REJECTLIST_IMPORTS.items(): if re.search(blacklist_import, name.name): new_only = options['new_only'] if self._is_new_module() and new_only: self.reporter.error( path=self.object_path, **options['error'] ) elif not new_only: self.reporter.error( path=self.object_path, **options['error'] ) def _find_module_utils(self, main): linenos = [] found_basic = False for child in self.ast.body: if isinstance(child, (ast.Import, ast.ImportFrom)): names = [] try: names.append(child.module) if child.module.endswith('.basic'): found_basic = True except AttributeError: pass names.extend([n.name for n in child.names]) if [n for n in names if n.startswith('ansible.module_utils')]: linenos.append(child.lineno) for name in child.names: if ('module_utils' in getattr(child, 'module', '') and isinstance(name, ast.alias) and name.name == '*'): msg = ( 'module-utils-specific-import', ('module_utils imports should import specific ' 'components, not "*"') ) if self._is_new_module(): self.reporter.error( path=self.object_path, code=msg[0], msg=msg[1], line=child.lineno ) else: self.reporter.warning( path=self.object_path, code=msg[0], msg=msg[1], line=child.lineno ) if (isinstance(name, ast.alias) and name.name == 'basic'): found_basic = True if not found_basic: self.reporter.warning( path=self.object_path, code='missing-module-utils-basic-import', msg='Did not find "ansible.module_utils.basic" import' ) return linenos def _get_first_callable(self): linenos = [] for child in self.ast.body: if isinstance(child, (ast.FunctionDef, ast.ClassDef)): linenos.append(child.lineno) return min(linenos) def _find_main_call(self, look_for="main"): """ Ensure that the module ends with: if __name__ == '__main__': main() OR, in the case of modules that are in the docs-only deprecation phase if __name__ == '__main__': removed_module() """ lineno = False if_bodies = [] for child in self.ast.body: if isinstance(child, ast.If): try: if child.test.left.id == '__name__': if_bodies.extend(child.body) except AttributeError: pass bodies = self.ast.body bodies.extend(if_bodies) for child in bodies: # validate that the next to last line is 'if __name__ == "__main__"' if child.lineno == (self.length - 1): mainchecked = False try: if isinstance(child, ast.If) and \ child.test.left.id == '__name__' and \ len(child.test.ops) == 1 and \ isinstance(child.test.ops[0], ast.Eq) and \ child.test.comparators[0].s == '__main__': mainchecked = True except Exception: pass if not mainchecked: self.reporter.error( path=self.object_path, code='missing-if-name-main', msg='Next to last line should be: if __name__ == "__main__":', line=child.lineno ) # validate that the final line is a call to main() if isinstance(child, ast.Expr): if isinstance(child.value, ast.Call): if (isinstance(child.value.func, ast.Name) and child.value.func.id == look_for): lineno = child.lineno if lineno < self.length - 1: self.reporter.error( path=self.object_path, code='last-line-main-call', msg=('Call to %s() not the last line' % look_for), line=lineno ) if not lineno: self.reporter.error( path=self.object_path, code='missing-main-call', msg=('Did not find a call to %s()' % look_for) ) return lineno or 0 def _find_has_import(self): for child in self.ast.body: found_try_except_import = False found_has = False if isinstance(child, TRY_EXCEPT): bodies = child.body for handler in child.handlers: bodies.extend(handler.body) for grandchild in bodies: if isinstance(grandchild, ast.Import): found_try_except_import = True if isinstance(grandchild, ast.Assign): for target in grandchild.targets: if target.id.lower().startswith('has_'): found_has = True if found_try_except_import and not found_has: # TODO: Add line/col self.reporter.warning( path=self.object_path, code='try-except-missing-has', msg='Found Try/Except block without HAS_ assignment' ) def _ensure_imports_below_docs(self, doc_info, first_callable): try: min_doc_line = min( [doc_info[key]['lineno'] for key in doc_info if doc_info[key]['lineno']] ) except ValueError: # We can't perform this validation, as there are no DOCs provided at all return max_doc_line = max( [doc_info[key]['end_lineno'] for key in doc_info if doc_info[key]['end_lineno']] ) import_lines = [] for child in self.ast.body: if isinstance(child, (ast.Import, ast.ImportFrom)): if isinstance(child, ast.ImportFrom) and child.module == '__future__': # allowed from __future__ imports for future_import in child.names: if future_import.name not in self.ACCEPTLIST_FUTURE_IMPORTS: self.reporter.error( path=self.object_path, code='illegal-future-imports', msg=('Only the following from __future__ imports are allowed: %s' % ', '.join(self.ACCEPTLIST_FUTURE_IMPORTS)), line=child.lineno ) break else: # for-else. If we didn't find a problem nad break out of the loop, then this is a legal import continue import_lines.append(child.lineno) if child.lineno < min_doc_line: self.reporter.error( path=self.object_path, code='import-before-documentation', msg=('Import found before documentation variables. ' 'All imports must appear below ' 'DOCUMENTATION/EXAMPLES/RETURN.'), line=child.lineno ) break elif isinstance(child, TRY_EXCEPT): bodies = child.body for handler in child.handlers: bodies.extend(handler.body) for grandchild in bodies: if isinstance(grandchild, (ast.Import, ast.ImportFrom)): import_lines.append(grandchild.lineno) if grandchild.lineno < min_doc_line: self.reporter.error( path=self.object_path, code='import-before-documentation', msg=('Import found before documentation ' 'variables. All imports must appear below ' 'DOCUMENTATION/EXAMPLES/RETURN.'), line=child.lineno ) break for import_line in import_lines: if not (max_doc_line < import_line < first_callable): msg = ( 'import-placement', ('Imports should be directly below DOCUMENTATION/EXAMPLES/' 'RETURN.') ) if self._is_new_module(): self.reporter.error( path=self.object_path, code=msg[0], msg=msg[1], line=import_line ) else: self.reporter.warning( path=self.object_path, code=msg[0], msg=msg[1], line=import_line ) def _validate_ps_replacers(self): # loop all (for/else + error) # get module list for each # check "shape" of each module name module_requires = r'(?im)^#\s*requires\s+\-module(?:s?)\s*(Ansible\.ModuleUtils\..+)' csharp_requires = r'(?im)^#\s*ansiblerequires\s+\-csharputil\s*(Ansible\..+)' found_requires = False for req_stmt in re.finditer(module_requires, self.text): found_requires = True # this will bomb on dictionary format - "don't do that" module_list = [x.strip() for x in req_stmt.group(1).split(',')] if len(module_list) > 1: self.reporter.error( path=self.object_path, code='multiple-utils-per-requires', msg='Ansible.ModuleUtils requirements do not support multiple modules per statement: "%s"' % req_stmt.group(0) ) continue module_name = module_list[0] if module_name.lower().endswith('.psm1'): self.reporter.error( path=self.object_path, code='invalid-requires-extension', msg='Module #Requires should not end in .psm1: "%s"' % module_name ) for req_stmt in re.finditer(csharp_requires, self.text): found_requires = True # this will bomb on dictionary format - "don't do that" module_list = [x.strip() for x in req_stmt.group(1).split(',')] if len(module_list) > 1: self.reporter.error( path=self.object_path, code='multiple-csharp-utils-per-requires', msg='Ansible C# util requirements do not support multiple utils per statement: "%s"' % req_stmt.group(0) ) continue module_name = module_list[0] if module_name.lower().endswith('.cs'): self.reporter.error( path=self.object_path, code='illegal-extension-cs', msg='Module #AnsibleRequires -CSharpUtil should not end in .cs: "%s"' % module_name ) # also accept the legacy #POWERSHELL_COMMON replacer signal if not found_requires and REPLACER_WINDOWS not in self.text: self.reporter.error( path=self.object_path, code='missing-module-utils-import-csharp-requirements', msg='No Ansible.ModuleUtils or C# Ansible util requirements/imports found' ) def _find_ps_docs_py_file(self): if self.object_name in self.PS_DOC_REJECTLIST: return py_path = self.path.replace('.ps1', '.py') if not os.path.isfile(py_path): self.reporter.error( path=self.object_path, code='missing-python-doc', msg='Missing python documentation file' ) return py_path def _get_docs(self): docs = { 'DOCUMENTATION': { 'value': None, 'lineno': 0, 'end_lineno': 0, }, 'EXAMPLES': { 'value': None, 'lineno': 0, 'end_lineno': 0, }, 'RETURN': { 'value': None, 'lineno': 0, 'end_lineno': 0, }, } for child in self.ast.body: if isinstance(child, ast.Assign): for grandchild in child.targets: if not isinstance(grandchild, ast.Name): continue if grandchild.id == 'DOCUMENTATION': docs['DOCUMENTATION']['value'] = child.value.s docs['DOCUMENTATION']['lineno'] = child.lineno docs['DOCUMENTATION']['end_lineno'] = ( child.lineno + len(child.value.s.splitlines()) ) elif grandchild.id == 'EXAMPLES': docs['EXAMPLES']['value'] = child.value.s docs['EXAMPLES']['lineno'] = child.lineno docs['EXAMPLES']['end_lineno'] = ( child.lineno + len(child.value.s.splitlines()) ) elif grandchild.id == 'RETURN': docs['RETURN']['value'] = child.value.s docs['RETURN']['lineno'] = child.lineno docs['RETURN']['end_lineno'] = ( child.lineno + len(child.value.s.splitlines()) ) return docs def _validate_docs_schema(self, doc, schema, name, error_code): # TODO: Add line/col errors = [] try: schema(doc) except Exception as e: for error in e.errors: error.data = doc errors.extend(e.errors) for error in errors: path = [str(p) for p in error.path] local_error_code = getattr(error, 'ansible_error_code', error_code) if isinstance(error.data, dict): error_message = humanize_error(error.data, error) else: error_message = error if path: combined_path = '%s.%s' % (name, '.'.join(path)) else: combined_path = name self.reporter.error( path=self.object_path, code=local_error_code, msg='%s: %s' % (combined_path, error_message) ) def _validate_docs(self): doc_info = self._get_docs() doc = None documentation_exists = False examples_exist = False returns_exist = False # We have three ways of marking deprecated/removed files. Have to check each one # individually and then make sure they all agree filename_deprecated_or_removed = False deprecated = False removed = False doc_deprecated = None # doc legally might not exist routing_says_deprecated = False if self.object_name.startswith('_') and not os.path.islink(self.object_path): filename_deprecated_or_removed = True # We are testing a collection if self.routing: routing_deprecation = self.routing.get('plugin_routing', {}).get('modules', {}).get(self.name, {}).get('deprecation', {}) if routing_deprecation: # meta/runtime.yml says this is deprecated routing_says_deprecated = True deprecated = True if not removed: if not bool(doc_info['DOCUMENTATION']['value']): self.reporter.error( path=self.object_path, code='missing-documentation', msg='No DOCUMENTATION provided' ) else: documentation_exists = True doc, errors, traces = parse_yaml( doc_info['DOCUMENTATION']['value'], doc_info['DOCUMENTATION']['lineno'], self.name, 'DOCUMENTATION' ) if doc: add_collection_to_versions_and_dates(doc, self.collection_name, is_module=True) for error in errors: self.reporter.error( path=self.object_path, code='documentation-syntax-error', **error ) for trace in traces: self.reporter.trace( path=self.object_path, tracebk=trace ) if not errors and not traces: missing_fragment = False with CaptureStd(): try: get_docstring(self.path, fragment_loader, verbose=True, collection_name=self.collection_name, is_module=True) except AssertionError: fragment = doc['extends_documentation_fragment'] self.reporter.error( path=self.object_path, code='missing-doc-fragment', msg='DOCUMENTATION fragment missing: %s' % fragment ) missing_fragment = True except Exception as e: self.reporter.trace( path=self.object_path, tracebk=traceback.format_exc() ) self.reporter.error( path=self.object_path, code='documentation-error', msg='Unknown DOCUMENTATION error, see TRACE: %s' % e ) if not missing_fragment: add_fragments(doc, self.object_path, fragment_loader=fragment_loader, is_module=True) if 'options' in doc and doc['options'] is None: self.reporter.error( path=self.object_path, code='invalid-documentation-options', msg='DOCUMENTATION.options must be a dictionary/hash when used', ) if 'deprecated' in doc and doc.get('deprecated'): doc_deprecated = True doc_deprecation = doc['deprecated'] documentation_collection = doc_deprecation.get('removed_from_collection') if documentation_collection != self.collection_name: self.reporter.error( path=self.object_path, code='deprecation-wrong-collection', msg='"DOCUMENTATION.deprecation.removed_from_collection must be the current collection name: %r vs. %r' % ( documentation_collection, self.collection_name) ) else: doc_deprecated = False if os.path.islink(self.object_path): # This module has an alias, which we can tell as it's a symlink # Rather than checking for `module: $filename` we need to check against the true filename self._validate_docs_schema( doc, doc_schema( os.readlink(self.object_path).split('.')[0], for_collection=bool(self.collection), deprecated_module=deprecated, ), 'DOCUMENTATION', 'invalid-documentation', ) else: # This is the normal case self._validate_docs_schema( doc, doc_schema( self.object_name.split('.')[0], for_collection=bool(self.collection), deprecated_module=deprecated, ), 'DOCUMENTATION', 'invalid-documentation', ) if not self.collection: existing_doc = self._check_for_new_args(doc) self._check_version_added(doc, existing_doc) if not bool(doc_info['EXAMPLES']['value']): self.reporter.error( path=self.object_path, code='missing-examples', msg='No EXAMPLES provided' ) else: _doc, errors, traces = parse_yaml(doc_info['EXAMPLES']['value'], doc_info['EXAMPLES']['lineno'], self.name, 'EXAMPLES', load_all=True) for error in errors: self.reporter.error( path=self.object_path, code='invalid-examples', **error ) for trace in traces: self.reporter.trace( path=self.object_path, tracebk=trace ) if not bool(doc_info['RETURN']['value']): if self._is_new_module(): self.reporter.error( path=self.object_path, code='missing-return', msg='No RETURN provided' ) else: self.reporter.warning( path=self.object_path, code='missing-return-legacy', msg='No RETURN provided' ) else: data, errors, traces = parse_yaml(doc_info['RETURN']['value'], doc_info['RETURN']['lineno'], self.name, 'RETURN') if data: add_collection_to_versions_and_dates(data, self.collection_name, is_module=True, return_docs=True) self._validate_docs_schema(data, return_schema(for_collection=bool(self.collection)), 'RETURN', 'return-syntax-error') for error in errors: self.reporter.error( path=self.object_path, code='return-syntax-error', **error ) for trace in traces: self.reporter.trace( path=self.object_path, tracebk=trace ) # Check for mismatched deprecation if not self.collection: mismatched_deprecation = True if not (filename_deprecated_or_removed or removed or deprecated or doc_deprecated): mismatched_deprecation = False else: if (filename_deprecated_or_removed and deprecated and doc_deprecated): mismatched_deprecation = False if (filename_deprecated_or_removed and removed and not (documentation_exists or examples_exist or returns_exist)): mismatched_deprecation = False if mismatched_deprecation: self.reporter.error( path=self.object_path, code='deprecation-mismatch', msg='Module deprecation/removed must agree in documentaiton, by prepending filename with' ' "_", and setting DOCUMENTATION.deprecated for deprecation or by removing all' ' documentation for removed' ) else: # We are testing a collection if self.object_name.startswith('_'): self.reporter.error( path=self.object_path, code='collections-no-underscore-on-deprecation', msg='Deprecated content in collections MUST NOT start with "_", update meta/runtime.yml instead', ) if not (doc_deprecated == routing_says_deprecated): # DOCUMENTATION.deprecated and meta/runtime.yml disagree self.reporter.error( path=self.object_path, code='deprecation-mismatch', msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree.' ) elif routing_says_deprecated: # Both DOCUMENTATION.deprecated and meta/runtime.yml agree that the module is deprecated. # Make sure they give the same version or date. routing_date = routing_deprecation.get('removal_date') routing_version = routing_deprecation.get('removal_version') # The versions and dates in the module documentation are auto-tagged, so remove the tag # to make comparison possible and to avoid confusing the user. documentation_date = doc_deprecation.get('removed_at_date') documentation_version = doc_deprecation.get('removed_in') if not compare_dates(routing_date, documentation_date): self.reporter.error( path=self.object_path, code='deprecation-mismatch', msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree on removal date: %r vs. %r' % ( routing_date, documentation_date) ) if routing_version != documentation_version: self.reporter.error( path=self.object_path, code='deprecation-mismatch', msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree on removal version: %r vs. %r' % ( routing_version, documentation_version) ) # In the future we should error if ANSIBLE_METADATA exists in a collection return doc_info, doc def _check_version_added(self, doc, existing_doc): version_added_raw = doc.get('version_added') try: collection_name = doc.get('version_added_collection') version_added = self._create_strict_version( str(version_added_raw or '0.0'), collection_name=collection_name) except ValueError as e: version_added = version_added_raw or '0.0' if self._is_new_module() or version_added != 'historical': # already reported during schema validation, except: if version_added == 'historical': self.reporter.error( path=self.object_path, code='module-invalid-version-added', msg='version_added is not a valid version number: %r. Error: %s' % (version_added, e) ) return if existing_doc and str(version_added_raw) != str(existing_doc.get('version_added')): self.reporter.error( path=self.object_path, code='module-incorrect-version-added', msg='version_added should be %r. Currently %r' % (existing_doc.get('version_added'), version_added_raw) ) if not self._is_new_module(): return should_be = '.'.join(ansible_version.split('.')[:2]) strict_ansible_version = self._create_strict_version(should_be, collection_name='ansible.builtin') if (version_added < strict_ansible_version or strict_ansible_version < version_added): self.reporter.error( path=self.object_path, code='module-incorrect-version-added', msg='version_added should be %r. Currently %r' % (should_be, version_added_raw) ) def _validate_ansible_module_call(self, docs): try: spec, args, kwargs = get_argument_spec(self.path, self.collection) except AnsibleModuleNotInitialized: self.reporter.error( path=self.object_path, code='ansible-module-not-initialized', msg="Execution of the module did not result in initialization of AnsibleModule", ) return except AnsibleModuleImportError as e: self.reporter.error( path=self.object_path, code='import-error', msg="Exception attempting to import module for argument_spec introspection, '%s'" % e ) self.reporter.trace( path=self.object_path, tracebk=traceback.format_exc() ) return self._validate_docs_schema(kwargs, ansible_module_kwargs_schema(for_collection=bool(self.collection)), 'AnsibleModule', 'invalid-ansiblemodule-schema') self._validate_argument_spec(docs, spec, kwargs) def _validate_list_of_module_args(self, name, terms, spec, context): if terms is None: return if not isinstance(terms, (list, tuple)): # This is already reported by schema checking return for check in terms: if not isinstance(check, (list, tuple)): # This is already reported by schema checking continue bad_term = False for term in check: if not isinstance(term, string_types): msg = name if context: msg += " found in %s" % " -> ".join(context) msg += " must contain strings in the lists or tuples; found value %r" % (term, ) self.reporter.error( path=self.object_path, code=name + '-type', msg=msg, ) bad_term = True if bad_term: continue if len(set(check)) != len(check): msg = name if context: msg += " found in %s" % " -> ".join(context) msg += " has repeated terms" self.reporter.error( path=self.object_path, code=name + '-collision', msg=msg, ) if not set(check) <= set(spec): msg = name if context: msg += " found in %s" % " -> ".join(context) msg += " contains terms which are not part of argument_spec: %s" % ", ".join(sorted(set(check).difference(set(spec)))) self.reporter.error( path=self.object_path, code=name + '-unknown', msg=msg, ) def _validate_required_if(self, terms, spec, context, module): if terms is None: return if not isinstance(terms, (list, tuple)): # This is already reported by schema checking return for check in terms: if not isinstance(check, (list, tuple)) or len(check) not in [3, 4]: # This is already reported by schema checking continue if len(check) == 4 and not isinstance(check[3], bool): msg = "required_if" if context: msg += " found in %s" % " -> ".join(context) msg += " must have forth value omitted or of type bool; got %r" % (check[3], ) self.reporter.error( path=self.object_path, code='required_if-is_one_of-type', msg=msg, ) requirements = check[2] if not isinstance(requirements, (list, tuple)): msg = "required_if" if context: msg += " found in %s" % " -> ".join(context) msg += " must have third value (requirements) being a list or tuple; got type %r" % (requirements, ) self.reporter.error( path=self.object_path, code='required_if-requirements-type', msg=msg, ) continue bad_term = False for term in requirements: if not isinstance(term, string_types): msg = "required_if" if context: msg += " found in %s" % " -> ".join(context) msg += " must have only strings in third value (requirements); got %r" % (term, ) self.reporter.error( path=self.object_path, code='required_if-requirements-type', msg=msg, ) bad_term = True if bad_term: continue if len(set(requirements)) != len(requirements): msg = "required_if" if context: msg += " found in %s" % " -> ".join(context) msg += " has repeated terms in requirements" self.reporter.error( path=self.object_path, code='required_if-requirements-collision', msg=msg, ) if not set(requirements) <= set(spec): msg = "required_if" if context: msg += " found in %s" % " -> ".join(context) msg += " contains terms in requirements which are not part of argument_spec: %s" % ", ".join(sorted(set(requirements).difference(set(spec)))) self.reporter.error( path=self.object_path, code='required_if-requirements-unknown', msg=msg, ) key = check[0] if key not in spec: msg = "required_if" if context: msg += " found in %s" % " -> ".join(context) msg += " must have its key %s in argument_spec" % key self.reporter.error( path=self.object_path, code='required_if-unknown-key', msg=msg, ) continue if key in requirements: msg = "required_if" if context: msg += " found in %s" % " -> ".join(context) msg += " contains its key %s in requirements" % key self.reporter.error( path=self.object_path, code='required_if-key-in-requirements', msg=msg, ) value = check[1] if value is not None: _type = spec[key].get('type', 'str') if callable(_type): _type_checker = _type else: _type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER.get(_type) try: with CaptureStd(): dummy = _type_checker(value) except (Exception, SystemExit): msg = "required_if" if context: msg += " found in %s" % " -> ".join(context) msg += " has value %r which does not fit to %s's parameter type %r" % (value, key, _type) self.reporter.error( path=self.object_path, code='required_if-value-type', msg=msg, ) def _validate_required_by(self, terms, spec, context): if terms is None: return if not isinstance(terms, Mapping): # This is already reported by schema checking return for key, value in terms.items(): if isinstance(value, string_types): value = [value] if not isinstance(value, (list, tuple)): # This is already reported by schema checking continue for term in value: if not isinstance(term, string_types): # This is already reported by schema checking continue if len(set(value)) != len(value) or key in value: msg = "required_by" if context: msg += " found in %s" % " -> ".join(context) msg += " has repeated terms" self.reporter.error( path=self.object_path, code='required_by-collision', msg=msg, ) if not set(value) <= set(spec) or key not in spec: msg = "required_by" if context: msg += " found in %s" % " -> ".join(context) msg += " contains terms which are not part of argument_spec: %s" % ", ".join(sorted(set(value).difference(set(spec)))) self.reporter.error( path=self.object_path, code='required_by-unknown', msg=msg, ) def _validate_argument_spec(self, docs, spec, kwargs, context=None, last_context_spec=None): if not self.analyze_arg_spec: return if docs is None: docs = {} if context is None: context = [] if last_context_spec is None: last_context_spec = kwargs try: if not context: add_fragments(docs, self.object_path, fragment_loader=fragment_loader, is_module=True) except Exception: # Cannot merge fragments return # Use this to access type checkers later module = NoArgsAnsibleModule({}) self._validate_list_of_module_args('mutually_exclusive', last_context_spec.get('mutually_exclusive'), spec, context) self._validate_list_of_module_args('required_together', last_context_spec.get('required_together'), spec, context) self._validate_list_of_module_args('required_one_of', last_context_spec.get('required_one_of'), spec, context) self._validate_required_if(last_context_spec.get('required_if'), spec, context, module) self._validate_required_by(last_context_spec.get('required_by'), spec, context) provider_args = set() args_from_argspec = set() deprecated_args_from_argspec = set() doc_options = docs.get('options', {}) if doc_options is None: doc_options = {} for arg, data in spec.items(): restricted_argument_names = ('message', 'syslog_facility') if arg.lower() in restricted_argument_names: msg = "Argument '%s' in argument_spec " % arg if context: msg += " found in %s" % " -> ".join(context) msg += "must not be one of %s as it is used " \ "internally by Ansible Core Engine" % (",".join(restricted_argument_names)) self.reporter.error( path=self.object_path, code='invalid-argument-name', msg=msg, ) continue if 'aliases' in data: for al in data['aliases']: if al.lower() in restricted_argument_names: msg = "Argument alias '%s' in argument_spec " % al if context: msg += " found in %s" % " -> ".join(context) msg += "must not be one of %s as it is used " \ "internally by Ansible Core Engine" % (",".join(restricted_argument_names)) self.reporter.error( path=self.object_path, code='invalid-argument-name', msg=msg, ) continue if not isinstance(data, dict): msg = "Argument '%s' in argument_spec" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " must be a dictionary/hash when used" self.reporter.error( path=self.object_path, code='invalid-argument-spec', msg=msg, ) continue removed_at_date = data.get('removed_at_date', None) if removed_at_date is not None: try: if parse_isodate(removed_at_date, allow_date=False) < datetime.date.today(): msg = "Argument '%s' in argument_spec" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " has a removed_at_date '%s' before today" % removed_at_date self.reporter.error( path=self.object_path, code='deprecated-date', msg=msg, ) except ValueError: # This should only happen when removed_at_date is not in ISO format. Since schema # validation already reported this as an error, don't report it a second time. pass deprecated_aliases = data.get('deprecated_aliases', None) if deprecated_aliases is not None: for deprecated_alias in deprecated_aliases: if 'name' in deprecated_alias and 'date' in deprecated_alias: try: date = deprecated_alias['date'] if parse_isodate(date, allow_date=False) < datetime.date.today(): msg = "Argument '%s' in argument_spec" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " has deprecated aliases '%s' with removal date '%s' before today" % ( deprecated_alias['name'], deprecated_alias['date']) self.reporter.error( path=self.object_path, code='deprecated-date', msg=msg, ) except ValueError: # This should only happen when deprecated_alias['date'] is not in ISO format. Since # schema validation already reported this as an error, don't report it a second # time. pass has_version = False if self.collection and self.collection_version is not None: compare_version = self.collection_version version_of_what = "this collection (%s)" % self.collection_version_str code_prefix = 'collection' has_version = True elif not self.collection: compare_version = LOOSE_ANSIBLE_VERSION version_of_what = "Ansible (%s)" % ansible_version code_prefix = 'ansible' has_version = True removed_in_version = data.get('removed_in_version', None) if removed_in_version is not None: try: collection_name = data.get('removed_from_collection') removed_in = self._create_version(str(removed_in_version), collection_name=collection_name) if has_version and collection_name == self.collection_name and compare_version >= removed_in: msg = "Argument '%s' in argument_spec" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " has a deprecated removed_in_version %r," % removed_in_version msg += " i.e. the version is less than or equal to the current version of %s" % version_of_what self.reporter.error( path=self.object_path, code=code_prefix + '-deprecated-version', msg=msg, ) except ValueError as e: msg = "Argument '%s' in argument_spec" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " has an invalid removed_in_version number %r: %s" % (removed_in_version, e) self.reporter.error( path=self.object_path, code='invalid-deprecated-version', msg=msg, ) except TypeError: msg = "Argument '%s' in argument_spec" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " has an invalid removed_in_version number %r: " % (removed_in_version, ) msg += " error while comparing to version of %s" % version_of_what self.reporter.error( path=self.object_path, code='invalid-deprecated-version', msg=msg, ) if deprecated_aliases is not None: for deprecated_alias in deprecated_aliases: if 'name' in deprecated_alias and 'version' in deprecated_alias: try: collection_name = deprecated_alias.get('collection_name') version = self._create_version(str(deprecated_alias['version']), collection_name=collection_name) if has_version and collection_name == self.collection_name and compare_version >= version: msg = "Argument '%s' in argument_spec" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " has deprecated aliases '%s' with removal in version %r," % ( deprecated_alias['name'], deprecated_alias['version']) msg += " i.e. the version is less than or equal to the current version of %s" % version_of_what self.reporter.error( path=self.object_path, code=code_prefix + '-deprecated-version', msg=msg, ) except ValueError as e: msg = "Argument '%s' in argument_spec" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " has deprecated aliases '%s' with invalid removal version %r: %s" % ( deprecated_alias['name'], deprecated_alias['version'], e) self.reporter.error( path=self.object_path, code='invalid-deprecated-version', msg=msg, ) except TypeError: msg = "Argument '%s' in argument_spec" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " has deprecated aliases '%s' with invalid removal version %r:" % ( deprecated_alias['name'], deprecated_alias['version']) msg += " error while comparing to version of %s" % version_of_what self.reporter.error( path=self.object_path, code='invalid-deprecated-version', msg=msg, ) aliases = data.get('aliases', []) if arg in aliases: msg = "Argument '%s' in argument_spec" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " is specified as its own alias" self.reporter.error( path=self.object_path, code='parameter-alias-self', msg=msg ) if len(aliases) > len(set(aliases)): msg = "Argument '%s' in argument_spec" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " has at least one alias specified multiple times in aliases" self.reporter.error( path=self.object_path, code='parameter-alias-repeated', msg=msg ) if not context and arg == 'state': bad_states = set(['list', 'info', 'get']) & set(data.get('choices', set())) for bad_state in bad_states: self.reporter.error( path=self.object_path, code='parameter-state-invalid-choice', msg="Argument 'state' includes the value '%s' as a choice" % bad_state) if not data.get('removed_in_version', None) and not data.get('removed_at_date', None): args_from_argspec.add(arg) args_from_argspec.update(aliases) else: deprecated_args_from_argspec.add(arg) deprecated_args_from_argspec.update(aliases) if arg == 'provider' and self.object_path.startswith('lib/ansible/modules/network/'): if data.get('options') is not None and not isinstance(data.get('options'), Mapping): self.reporter.error( path=self.object_path, code='invalid-argument-spec-options', msg="Argument 'options' in argument_spec['provider'] must be a dictionary/hash when used", ) elif data.get('options'): # Record provider options from network modules, for later comparison for provider_arg, provider_data in data.get('options', {}).items(): provider_args.add(provider_arg) provider_args.update(provider_data.get('aliases', [])) if data.get('required') and data.get('default', object) != object: msg = "Argument '%s' in argument_spec" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " is marked as required but specifies a default. Arguments with a" \ " default should not be marked as required" self.reporter.error( path=self.object_path, code='no-default-for-required-parameter', msg=msg ) if arg in provider_args: # Provider args are being removed from network module top level # don't validate docs<->arg_spec checks below continue _type = data.get('type', 'str') if callable(_type): _type_checker = _type else: _type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER.get(_type) _elements = data.get('elements') if (_type == 'list') and not _elements: msg = "Argument '%s' in argument_spec" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " defines type as list but elements is not defined" self.reporter.error( path=self.object_path, code='parameter-list-no-elements', msg=msg ) if _elements: if not callable(_elements): module._CHECK_ARGUMENT_TYPES_DISPATCHER.get(_elements) if _type != 'list': msg = "Argument '%s' in argument_spec" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " defines elements as %s but it is valid only when value of parameter type is list" % _elements self.reporter.error( path=self.object_path, code='parameter-invalid-elements', msg=msg ) arg_default = None if 'default' in data and not is_empty(data['default']): try: with CaptureStd(): arg_default = _type_checker(data['default']) except (Exception, SystemExit): msg = "Argument '%s' in argument_spec" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " defines default as (%r) but this is incompatible with parameter type %r" % (data['default'], _type) self.reporter.error( path=self.object_path, code='incompatible-default-type', msg=msg ) continue elif data.get('default') is None and _type == 'bool' and 'options' not in data: arg_default = False doc_options_args = [] for alias in sorted(set([arg] + list(aliases))): if alias in doc_options: doc_options_args.append(alias) if len(doc_options_args) == 0: # Undocumented arguments will be handled later (search for undocumented-parameter) doc_options_arg = {} else: doc_options_arg = doc_options[doc_options_args[0]] if len(doc_options_args) > 1: msg = "Argument '%s' in argument_spec" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " with aliases %s is documented multiple times, namely as %s" % ( ", ".join([("'%s'" % alias) for alias in aliases]), ", ".join([("'%s'" % alias) for alias in doc_options_args]) ) self.reporter.error( path=self.object_path, code='parameter-documented-multiple-times', msg=msg ) try: doc_default = None if 'default' in doc_options_arg and not is_empty(doc_options_arg['default']): with CaptureStd(): doc_default = _type_checker(doc_options_arg['default']) elif doc_options_arg.get('default') is None and _type == 'bool' and 'suboptions' not in doc_options_arg: doc_default = False except (Exception, SystemExit): msg = "Argument '%s' in documentation" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " defines default as (%r) but this is incompatible with parameter type %r" % (doc_options_arg.get('default'), _type) self.reporter.error( path=self.object_path, code='doc-default-incompatible-type', msg=msg ) continue if arg_default != doc_default: msg = "Argument '%s' in argument_spec" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " defines default as (%r) but documentation defines default as (%r)" % (arg_default, doc_default) self.reporter.error( path=self.object_path, code='doc-default-does-not-match-spec', msg=msg ) doc_type = doc_options_arg.get('type') if 'type' in data and data['type'] is not None: if doc_type is None: if not arg.startswith('_'): # hidden parameter, for example _raw_params msg = "Argument '%s' in argument_spec" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " defines type as %r but documentation doesn't define type" % (data['type']) self.reporter.error( path=self.object_path, code='parameter-type-not-in-doc', msg=msg ) elif data['type'] != doc_type: msg = "Argument '%s' in argument_spec" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " defines type as %r but documentation defines type as %r" % (data['type'], doc_type) self.reporter.error( path=self.object_path, code='doc-type-does-not-match-spec', msg=msg ) else: if doc_type is None: msg = "Argument '%s' in argument_spec" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " uses default type ('str') but documentation doesn't define type" self.reporter.error( path=self.object_path, code='doc-missing-type', msg=msg ) elif doc_type != 'str': msg = "Argument '%s' in argument_spec" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " implies type as 'str' but documentation defines as %r" % doc_type self.reporter.error( path=self.object_path, code='implied-parameter-type-mismatch', msg=msg ) doc_choices = [] try: for choice in doc_options_arg.get('choices', []): try: with CaptureStd(): doc_choices.append(_type_checker(choice)) except (Exception, SystemExit): msg = "Argument '%s' in documentation" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " defines choices as (%r) but this is incompatible with argument type %r" % (choice, _type) self.reporter.error( path=self.object_path, code='doc-choices-incompatible-type', msg=msg ) raise StopIteration() except StopIteration: continue arg_choices = [] try: for choice in data.get('choices', []): try: with CaptureStd(): arg_choices.append(_type_checker(choice)) except (Exception, SystemExit): msg = "Argument '%s' in argument_spec" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " defines choices as (%r) but this is incompatible with argument type %r" % (choice, _type) self.reporter.error( path=self.object_path, code='incompatible-choices', msg=msg ) raise StopIteration() except StopIteration: continue if not compare_unordered_lists(arg_choices, doc_choices): msg = "Argument '%s' in argument_spec" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " defines choices as (%r) but documentation defines choices as (%r)" % (arg_choices, doc_choices) self.reporter.error( path=self.object_path, code='doc-choices-do-not-match-spec', msg=msg ) doc_required = doc_options_arg.get('required', False) data_required = data.get('required', False) if (doc_required or data_required) and not (doc_required and data_required): msg = "Argument '%s' in argument_spec" % arg if context: msg += " found in %s" % " -> ".join(context) if doc_required: msg += " is not required, but is documented as being required" else: msg += " is required, but is not documented as being required" self.reporter.error( path=self.object_path, code='doc-required-mismatch', msg=msg ) doc_elements = doc_options_arg.get('elements', None) doc_type = doc_options_arg.get('type', 'str') data_elements = data.get('elements', None) if (doc_elements and not doc_type == 'list'): msg = "Argument '%s' " % arg if context: msg += " found in %s" % " -> ".join(context) msg += " defines parameter elements as %s but it is valid only when value of parameter type is list" % doc_elements self.reporter.error( path=self.object_path, code='doc-elements-invalid', msg=msg ) if (doc_elements or data_elements) and not (doc_elements == data_elements): msg = "Argument '%s' in argument_spec" % arg if context: msg += " found in %s" % " -> ".join(context) if data_elements: msg += " specifies elements as %s," % data_elements else: msg += " does not specify elements," if doc_elements: msg += "but elements is documented as being %s" % doc_elements else: msg += "but elements is not documented" self.reporter.error( path=self.object_path, code='doc-elements-mismatch', msg=msg ) spec_suboptions = data.get('options') doc_suboptions = doc_options_arg.get('suboptions', {}) if spec_suboptions: if not doc_suboptions: msg = "Argument '%s' in argument_spec" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " has sub-options but documentation does not define it" self.reporter.error( path=self.object_path, code='missing-suboption-docs', msg=msg ) self._validate_argument_spec({'options': doc_suboptions}, spec_suboptions, kwargs, context=context + [arg], last_context_spec=data) for arg in args_from_argspec: if not str(arg).isidentifier(): msg = "Argument '%s' in argument_spec" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " is not a valid python identifier" self.reporter.error( path=self.object_path, code='parameter-invalid', msg=msg ) if docs: args_from_docs = set() for arg, data in doc_options.items(): args_from_docs.add(arg) args_from_docs.update(data.get('aliases', [])) args_missing_from_docs = args_from_argspec.difference(args_from_docs) docs_missing_from_args = args_from_docs.difference(args_from_argspec | deprecated_args_from_argspec) for arg in args_missing_from_docs: if arg in provider_args: # Provider args are being removed from network module top level # So they are likely not documented on purpose continue msg = "Argument '%s'" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " is listed in the argument_spec, but not documented in the module documentation" self.reporter.error( path=self.object_path, code='undocumented-parameter', msg=msg ) for arg in docs_missing_from_args: msg = "Argument '%s'" % arg if context: msg += " found in %s" % " -> ".join(context) msg += " is listed in DOCUMENTATION.options, but not accepted by the module argument_spec" self.reporter.error( path=self.object_path, code='nonexistent-parameter-documented', msg=msg ) def _check_for_new_args(self, doc): if not self.base_branch or self._is_new_module(): return with CaptureStd(): try: existing_doc, dummy_examples, dummy_return, existing_metadata = get_docstring( self.base_module, fragment_loader, verbose=True, collection_name=self.collection_name, is_module=True) existing_options = existing_doc.get('options', {}) or {} except AssertionError: fragment = doc['extends_documentation_fragment'] self.reporter.warning( path=self.object_path, code='missing-existing-doc-fragment', msg='Pre-existing DOCUMENTATION fragment missing: %s' % fragment ) return except Exception as e: self.reporter.warning_trace( path=self.object_path, tracebk=e ) self.reporter.warning( path=self.object_path, code='unknown-doc-fragment', msg=('Unknown pre-existing DOCUMENTATION error, see TRACE. Submodule refs may need updated') ) return try: mod_collection_name = existing_doc.get('version_added_collection') mod_version_added = self._create_strict_version( str(existing_doc.get('version_added', '0.0')), collection_name=mod_collection_name) except ValueError: mod_collection_name = self.collection_name mod_version_added = self._create_strict_version('0.0') options = doc.get('options', {}) or {} should_be = '.'.join(ansible_version.split('.')[:2]) strict_ansible_version = self._create_strict_version(should_be, collection_name='ansible.builtin') for option, details in options.items(): try: names = [option] + details.get('aliases', []) except (TypeError, AttributeError): # Reporting of this syntax error will be handled by schema validation. continue if any(name in existing_options for name in names): # The option already existed. Make sure version_added didn't change. for name in names: existing_collection_name = existing_options.get(name, {}).get('version_added_collection') existing_version = existing_options.get(name, {}).get('version_added') if existing_version: break current_collection_name = details.get('version_added_collection') current_version = details.get('version_added') if current_collection_name != existing_collection_name: self.reporter.error( path=self.object_path, code='option-incorrect-version-added-collection', msg=('version_added for existing option (%s) should ' 'belong to collection %r. Currently belongs to %r' % (option, current_collection_name, existing_collection_name)) ) elif str(current_version) != str(existing_version): self.reporter.error( path=self.object_path, code='option-incorrect-version-added', msg=('version_added for existing option (%s) should ' 'be %r. Currently %r' % (option, existing_version, current_version)) ) continue try: collection_name = details.get('version_added_collection') version_added = self._create_strict_version( str(details.get('version_added', '0.0')), collection_name=collection_name) except ValueError as e: # already reported during schema validation continue if collection_name != self.collection_name: continue if (strict_ansible_version != mod_version_added and (version_added < strict_ansible_version or strict_ansible_version < version_added)): self.reporter.error( path=self.object_path, code='option-incorrect-version-added', msg=('version_added for new option (%s) should ' 'be %r. Currently %r' % (option, should_be, version_added)) ) return existing_doc @staticmethod def is_blacklisted(path): base_name = os.path.basename(path) file_name = os.path.splitext(base_name)[0] if file_name.startswith('_') and os.path.islink(path): return True if not frozenset((base_name, file_name)).isdisjoint(ModuleValidator.REJECTLIST): return True for pat in ModuleValidator.REJECTLIST_PATTERNS: if fnmatch(base_name, pat): return True return False def validate(self): super(ModuleValidator, self).validate() if not self._python_module() and not self._powershell_module(): self.reporter.error( path=self.object_path, code='invalid-extension', msg=('Official Ansible modules must have a .py ' 'extension for python modules or a .ps1 ' 'for powershell modules') ) self._python_module_override = True if self._python_module() and self.ast is None: self.reporter.error( path=self.object_path, code='python-syntax-error', msg='Python SyntaxError while parsing module' ) try: compile(self.text, self.path, 'exec') except Exception: self.reporter.trace( path=self.object_path, tracebk=traceback.format_exc() ) return end_of_deprecation_should_be_removed_only = False if self._python_module(): doc_info, docs = self._validate_docs() # See if current version => deprecated.removed_in, ie, should be docs only if docs and docs.get('deprecated', False): if 'removed_in' in docs['deprecated']: removed_in = None collection_name = docs['deprecated'].get('removed_from_collection') version = docs['deprecated']['removed_in'] if collection_name != self.collection_name: self.reporter.error( path=self.object_path, code='invalid-module-deprecation-source', msg=('The deprecation version for a module must be added in this collection') ) else: try: removed_in = self._create_strict_version(str(version), collection_name=collection_name) except ValueError as e: self.reporter.error( path=self.object_path, code='invalid-module-deprecation-version', msg=('The deprecation version %r cannot be parsed: %s' % (version, e)) ) if removed_in: if not self.collection: strict_ansible_version = self._create_strict_version( '.'.join(ansible_version.split('.')[:2]), self.collection_name) end_of_deprecation_should_be_removed_only = strict_ansible_version >= removed_in elif self.collection_version: strict_ansible_version = self.collection_version end_of_deprecation_should_be_removed_only = strict_ansible_version >= removed_in # handle deprecation by date if 'removed_at_date' in docs['deprecated']: try: removed_at_date = docs['deprecated']['removed_at_date'] if parse_isodate(removed_at_date, allow_date=True) < datetime.date.today(): msg = "Module's deprecated.removed_at_date date '%s' is before today" % removed_at_date self.reporter.error(path=self.object_path, code='deprecated-date', msg=msg) except ValueError: # This happens if the date cannot be parsed. This is already checked by the schema. pass if self._python_module() and not self._just_docs() and not end_of_deprecation_should_be_removed_only: self._validate_ansible_module_call(docs) self._check_for_sys_exit() self._find_blacklist_imports() main = self._find_main_call() self._find_module_utils(main) self._find_has_import() first_callable = self._get_first_callable() self._ensure_imports_below_docs(doc_info, first_callable) self._check_for_subprocess() self._check_for_os_call() if self._powershell_module(): if self.basename in self.PS_DOC_REJECTLIST: return self._validate_ps_replacers() docs_path = self._find_ps_docs_py_file() # We can only validate PowerShell arg spec if it is using the new Ansible.Basic.AnsibleModule util pattern = r'(?im)^#\s*ansiblerequires\s+\-csharputil\s*Ansible\.Basic' if re.search(pattern, self.text) and self.object_name not in self.PS_ARG_VALIDATE_REJECTLIST: with ModuleValidator(docs_path, base_branch=self.base_branch, git_cache=self.git_cache) as docs_mv: docs = docs_mv._validate_docs()[1] self._validate_ansible_module_call(docs) self._check_gpl3_header() if not self._just_docs() and not end_of_deprecation_should_be_removed_only: self._check_interpreter(powershell=self._powershell_module()) self._check_type_instead_of_isinstance( powershell=self._powershell_module() ) if end_of_deprecation_should_be_removed_only: # Ensure that `if __name__ == '__main__':` calls `removed_module()` which ensure that the module has no code in main = self._find_main_call('removed_module') # FIXME: Ensure that the version in the call to removed_module is less than +2. # Otherwise it's time to remove the file (This may need to be done in another test to # avoid breaking whenever the Ansible version bumps) class PythonPackageValidator(Validator): REJECTLIST_FILES = frozenset(('__pycache__',)) def __init__(self, path, reporter=None): super(PythonPackageValidator, self).__init__(reporter=reporter or Reporter()) self.path = path self.basename = os.path.basename(path) @property def object_name(self): return self.basename @property def object_path(self): return self.path def validate(self): super(PythonPackageValidator, self).validate() if self.basename in self.REJECTLIST_FILES: return init_file = os.path.join(self.path, '__init__.py') if not os.path.exists(init_file): self.reporter.error( path=self.object_path, code='subdirectory-missing-init', msg='Ansible module subdirectories must contain an __init__.py' ) def setup_collection_loader(): collections_paths = os.environ.get('ANSIBLE_COLLECTIONS_PATH', '').split(os.pathsep) _AnsibleCollectionFinder(collections_paths) def re_compile(value): """ Argparse expects things to raise TypeError, re.compile raises an re.error exception This function is a shorthand to convert the re.error exception to a TypeError """ try: return re.compile(value) except re.error as e: raise TypeError(e) def run(): parser = argparse.ArgumentParser(prog="validate-modules") parser.add_argument('modules', nargs='+', help='Path to module or module directory') parser.add_argument('-w', '--warnings', help='Show warnings', action='store_true') parser.add_argument('--exclude', help='RegEx exclusion pattern', type=re_compile) parser.add_argument('--arg-spec', help='Analyze module argument spec', action='store_true', default=False) parser.add_argument('--base-branch', default=None, help='Used in determining if new options were added') parser.add_argument('--format', choices=['json', 'plain'], default='plain', help='Output format. Default: "%(default)s"') parser.add_argument('--output', default='-', help='Output location, use "-" for stdout. ' 'Default "%(default)s"') parser.add_argument('--collection', help='Specifies the path to the collection, when ' 'validating files within a collection. Ensure ' 'that ANSIBLE_COLLECTIONS_PATH is set so the ' 'contents of the collection can be located') parser.add_argument('--collection-version', help='The collection\'s version number used to check ' 'deprecations') args = parser.parse_args() args.modules = [m.rstrip('/') for m in args.modules] reporter = Reporter() git_cache = GitCache(args.base_branch) check_dirs = set() routing = None if args.collection: setup_collection_loader() routing_file = 'meta/runtime.yml' # Load meta/runtime.yml if it exists, as it may contain deprecation information if os.path.isfile(routing_file): try: with open(routing_file) as f: routing = yaml.safe_load(f) except yaml.error.MarkedYAMLError as ex: print('%s:%d:%d: YAML load failed: %s' % (routing_file, ex.context_mark.line + 1, ex.context_mark.column + 1, re.sub(r'\s+', ' ', str(ex)))) except Exception as ex: # pylint: disable=broad-except print('%s:%d:%d: YAML load failed: %s' % (routing_file, 0, 0, re.sub(r'\s+', ' ', str(ex)))) for module in args.modules: if os.path.isfile(module): path = module if args.exclude and args.exclude.search(path): continue if ModuleValidator.is_blacklisted(path): continue with ModuleValidator(path, collection=args.collection, collection_version=args.collection_version, analyze_arg_spec=args.arg_spec, base_branch=args.base_branch, git_cache=git_cache, reporter=reporter, routing=routing) as mv1: mv1.validate() check_dirs.add(os.path.dirname(path)) for root, dirs, files in os.walk(module): basedir = root[len(module) + 1:].split('/', 1)[0] if basedir in REJECTLIST_DIRS: continue for dirname in dirs: if root == module and dirname in REJECTLIST_DIRS: continue path = os.path.join(root, dirname) if args.exclude and args.exclude.search(path): continue check_dirs.add(path) for filename in files: path = os.path.join(root, filename) if args.exclude and args.exclude.search(path): continue if ModuleValidator.is_blacklisted(path): continue with ModuleValidator(path, collection=args.collection, collection_version=args.collection_version, analyze_arg_spec=args.arg_spec, base_branch=args.base_branch, git_cache=git_cache, reporter=reporter, routing=routing) as mv2: mv2.validate() if not args.collection: for path in sorted(check_dirs): pv = PythonPackageValidator(path, reporter=reporter) pv.validate() if args.format == 'plain': sys.exit(reporter.plain(warnings=args.warnings, output=args.output)) else: sys.exit(reporter.json(warnings=args.warnings, output=args.output)) class GitCache: def __init__(self, base_branch): self.base_branch = base_branch if self.base_branch: self.base_tree = self._git(['ls-tree', '-r', '--name-only', self.base_branch, 'lib/ansible/modules/']) else: self.base_tree = [] try: self.head_tree = self._git(['ls-tree', '-r', '--name-only', 'HEAD', 'lib/ansible/modules/']) except GitError as ex: if ex.status == 128: # fallback when there is no .git directory self.head_tree = self._get_module_files() else: raise except OSError as ex: if ex.errno == errno.ENOENT: # fallback when git is not installed self.head_tree = self._get_module_files() else: raise self.base_module_paths = dict((os.path.basename(p), p) for p in self.base_tree if os.path.splitext(p)[1] in ('.py', '.ps1')) self.base_module_paths.pop('__init__.py', None) self.head_aliased_modules = set() for path in self.head_tree: filename = os.path.basename(path) if filename.startswith('_') and filename != '__init__.py': if os.path.islink(path): self.head_aliased_modules.add(os.path.basename(os.path.realpath(path))) @staticmethod def _get_module_files(): module_files = [] for (dir_path, dir_names, file_names) in os.walk('lib/ansible/modules/'): for file_name in file_names: module_files.append(os.path.join(dir_path, file_name)) return module_files @staticmethod def _git(args): cmd = ['git'] + args p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode != 0: raise GitError(stderr, p.returncode) return stdout.decode('utf-8').splitlines() class GitError(Exception): def __init__(self, message, status): super(GitError, self).__init__(message) self.status = status def main(): try: run() except KeyboardInterrupt: pass
jomauricio/abgthe
refs/heads/master
abgthe/apps/profiles/migrations/0003_auto_20150505_1813.py
2
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('profiles', '0002_auto_20150421_1529'), ] operations = [ migrations.AlterField( model_name='profile', name='birthday', field=models.DateField(max_length=10, null=True, verbose_name=b'Nascimento', blank=True), preserve_default=True, ), migrations.AlterField( model_name='profile', name='gender', field=models.CharField(blank=True, max_length=20, verbose_name=b'Sexo', choices=[(b'masculino', 'Masculino'), (b'feminino', 'Feminino')]), preserve_default=True, ), ]
mlalic/servo
refs/heads/master
tests/wpt/web-platform-tests/tools/html5lib/html5lib/treewalkers/__init__.py
1229
"""A collection of modules for iterating through different kinds of tree, generating tokens identical to those produced by the tokenizer module. To create a tree walker for a new type of tree, you need to do implement a tree walker object (called TreeWalker by convention) that implements a 'serialize' method taking a tree as sole argument and returning an iterator generating tokens. """ from __future__ import absolute_import, division, unicode_literals import sys from ..utils import default_etree treeWalkerCache = {} def getTreeWalker(treeType, implementation=None, **kwargs): """Get a TreeWalker class for various types of tree with built-in support treeType - the name of the tree type required (case-insensitive). Supported values are: "dom" - The xml.dom.minidom DOM implementation "pulldom" - The xml.dom.pulldom event stream "etree" - A generic walker for tree implementations exposing an elementtree-like interface (known to work with ElementTree, cElementTree and lxml.etree). "lxml" - Optimized walker for lxml.etree "genshi" - a Genshi stream implementation - (Currently applies to the "etree" tree type only). A module implementing the tree type e.g. xml.etree.ElementTree or cElementTree.""" treeType = treeType.lower() if treeType not in treeWalkerCache: if treeType in ("dom", "pulldom"): name = "%s.%s" % (__name__, treeType) __import__(name) mod = sys.modules[name] treeWalkerCache[treeType] = mod.TreeWalker elif treeType == "genshi": from . import genshistream treeWalkerCache[treeType] = genshistream.TreeWalker elif treeType == "lxml": from . import lxmletree treeWalkerCache[treeType] = lxmletree.TreeWalker elif treeType == "etree": from . import etree if implementation is None: implementation = default_etree # XXX: NEVER cache here, caching is done in the etree submodule return etree.getETreeModule(implementation, **kwargs).TreeWalker return treeWalkerCache.get(treeType)
rodrigosetti/recruitr
refs/heads/master
candidates/views.py
1
from django.contrib import messages from django.contrib.auth.decorators import login_required from django.shortcuts import render, redirect from django.utils.translation import ugettext as _ from .forms import UserForm, CandidateForm from .models import Candidate @login_required def update_candidate(request): if request.method == 'POST': print(request.FILES) user_form = UserForm(request.POST, instance=request.user) candidate_form = CandidateForm(request.POST, request.FILES, instance=request.user.candidate) if user_form.is_valid() and candidate_form.is_valid(): user_form.save() candidate_form.save() messages.success(request, _('Your profile was successfully updated!')) return redirect('account-profile') else: messages.error(request, _('Please correct the error below.')) else: user_form = UserForm(instance=request.user) candidate, created = Candidate.objects.get_or_create(user=request.user) candidate_form = CandidateForm(instance=candidate) return render(request, 'profile.html', { 'user_form': user_form, 'candidate_form': candidate_form })
liang42hao/bokeh
refs/heads/master
examples/plotting/server/brewer.py
42
# The plot server must be running # Go to http://localhost:5006/bokeh to view this plot from collections import OrderedDict import numpy as np import pandas as pd from bokeh.plotting import figure, show, output_server from bokeh.palettes import brewer N = 20 categories = ['y' + str(x) for x in range(10)] data = {} data['x'] = np.arange(N) for cat in categories: data[cat] = np.random.randint(10, 100, size=N) df = pd.DataFrame(data) df = df.set_index(['x']) def stacked(df, categories): areas = OrderedDict() last = np.zeros(len(df[categories[0]])) for cat in categories: next = last + df[cat] areas[cat] = np.hstack((last[::-1], next)) last = next return areas areas = stacked(df, categories) colors = brewer["Spectral"][len(areas)] x2 = np.hstack((data['x'][::-1], data['x'])) output_server("brewer") p = figure() p.patches([x2 for a in areas], list(areas.values()), color=colors, alpha=0.8, line_color=None) show(p)
cslzchen/osf.io
refs/heads/develop
addons/s3/tests/factories.py
32
# -*- coding: utf-8 -*- """Factories for the S3 addon.""" import factory from factory.django import DjangoModelFactory from osf_tests.factories import UserFactory, ProjectFactory, ExternalAccountFactory from addons.s3.models import ( UserSettings, NodeSettings ) class S3AccountFactory(ExternalAccountFactory): provider = 's3' provider_id = factory.Sequence(lambda n: 'id-{0}'.format(n)) oauth_key = factory.Sequence(lambda n: 'key-{0}'.format(n)) oauth_secret = factory.Sequence(lambda n: 'secret-{0}'.format(n)) display_name = 'S3 Fake User' class S3UserSettingsFactory(DjangoModelFactory): class Meta: model = UserSettings owner = factory.SubFactory(UserFactory) class S3NodeSettingsFactory(DjangoModelFactory): class Meta: model = NodeSettings owner = factory.SubFactory(ProjectFactory) user_settings = factory.SubFactory(S3UserSettingsFactory)
patrickwestphal/owlapy
refs/heads/master
tests/vocab/owl2datatype_tests.py
1
import unittest from owlapy.vocab.owl2datatype import OWL2Datatype from owlapy.vocab.owl2datatype import Category from owlapy.model import IRI from owlapy.model import EntityType from owlapy.model import OWLDatatype from owlapy.model import OWLDataVisitor, OWLDataVisitorEx from owlapy.model import OWLRuntimeException from owlapy.model import DataRangeType class TestOWL2Datatype(unittest.TestCase): def test___init__(self): self.assertEqual(34, len(OWL2Datatype)) # all attributes tested in two examples self.assertEqual(IRI('http://www.w3.org/2002/07/owl#real'), OWL2Datatype.OWL_REAL.iri) self.assertEqual('owl:real', OWL2Datatype.OWL_REAL.prefixed_name) self.assertEqual('real', OWL2Datatype.OWL_REAL.short_form) self.assertEqual(Category.CAT_NUMBER, OWL2Datatype.OWL_REAL.category) self.assertFalse(OWL2Datatype.OWL_REAL.finite) self.assertEqual('.*', OWL2Datatype.OWL_REAL.pattern_string) self.assertIsNotNone(OWL2Datatype.OWL_REAL.pattern) self.assertEqual(IRI('http://www.w3.org/2001/XMLSchema#double'), OWL2Datatype.XSD_DOUBLE.iri) self.assertEqual('xsd:double', OWL2Datatype.XSD_DOUBLE.prefixed_name) self.assertEqual('double', OWL2Datatype.XSD_DOUBLE.short_form) self.assertEqual(Category.CAT_NUMBER, OWL2Datatype.XSD_DOUBLE.category) self.assertTrue(OWL2Datatype.XSD_DOUBLE.finite) self.assertEqual( '\\A(\\+|-)?(([0-9]+(\\.[0-9]*)?|\\.[0-9]+)(([Ee])((\\+|-)?' '([0-9]+)))?|((\\+|-)?INF|NaN))\\Z', OWL2Datatype.XSD_DOUBLE.pattern_string) self.assertIsNotNone(OWL2Datatype.XSD_DOUBLE.pattern) # all examples tested on one attribute self.assertEqual('XMLLiteral', OWL2Datatype.RDF_XML_LITERAL.short_form) self.assertEqual('Literal', OWL2Datatype.RDFS_LITERAL.short_form) # 2 self.assertEqual('PlainLiteral', OWL2Datatype.RDF_PLAIN_LITERAL.short_form) # 3 self.assertEqual('real', OWL2Datatype.OWL_REAL.short_form) # 4 self.assertEqual('rational', OWL2Datatype.OWL_RATIONAL.short_form) # 5 self.assertEqual('string', OWL2Datatype.XSD_STRING.short_form) # 6 self.assertEqual('normalizedString', OWL2Datatype.XSD_NORMALIZED_STRING.short_form) # 7 self.assertEqual('token', OWL2Datatype.XSD_TOKEN.short_form) # 8 self.assertEqual('language', OWL2Datatype.XSD_LANGUAGE.short_form) # 9 self.assertEqual('Name', OWL2Datatype.XSD_NAME.short_form) # 10 self.assertEqual('NCName', OWL2Datatype.XSD_NCNAME.short_form) # 11 self.assertEqual('NMTOKEN', OWL2Datatype.XSD_NMTOKEN.short_form) # 12 self.assertEqual('decimal', OWL2Datatype.XSD_DECIMAL.short_form) # 13 self.assertEqual('integer', OWL2Datatype.XSD_INTEGER.short_form) # 14 self.assertEqual('nonNegativeInteger', OWL2Datatype.XSD_NON_NEGATIVE_INTEGER.short_form) # 15 self.assertEqual('nonPositiveInteger', OWL2Datatype.XSD_NON_POSITIVE_INTEGER.short_form) # 16 self.assertEqual('positiveInteger', OWL2Datatype.XSD_POSITIVE_INTEGER.short_form) # 17 self.assertEqual('negativeInteger', OWL2Datatype.XSD_NEGATIVE_INTEGER.short_form) # 18 self.assertEqual('long', OWL2Datatype.XSD_LONG.short_form) # 19 self.assertEqual('int', OWL2Datatype.XSD_INT.short_form) # 20 self.assertEqual('short', OWL2Datatype.XSD_SHORT.short_form) # 21 self.assertEqual('byte', OWL2Datatype.XSD_BYTE.short_form) # 22 self.assertEqual('unsignedLong', OWL2Datatype.XSD_UNSIGNED_LONG.short_form) # 23 self.assertEqual('unsignedInt', OWL2Datatype.XSD_UNSIGNED_INT.short_form) # 24 self.assertEqual('unsignedShort', OWL2Datatype.XSD_UNSIGNED_SHORT.short_form) # 25 self.assertEqual('unsignedByte', OWL2Datatype.XSD_UNSIGNED_BYTE.short_form) # 26 self.assertEqual('double', OWL2Datatype.XSD_DOUBLE.short_form) # 27 self.assertEqual('float', OWL2Datatype.XSD_FLOAT.short_form) # 28 self.assertEqual('boolean', OWL2Datatype.XSD_BOOLEAN.short_form) # 29 self.assertEqual('hexBinary', OWL2Datatype.XSD_HEX_BINARY.short_form) self.assertEqual('base64Binary', OWL2Datatype.XSD_BASE_64_BINARY.short_form) # 31 self.assertEqual('anyURI', OWL2Datatype.XSD_ANY_URI.short_form) # 32 self.assertEqual('dateTime', OWL2Datatype.XSD_DATE_TIME.short_form) self.assertEqual('dateTimeStamp', OWL2Datatype.XSD_DATE_TIME_STAMP.short_form) # 34 def test_all_iris(self): self.assertIn( IRI('http://www.w3.org/1999/02/22-rdf-syntax-ns#XMLLiteral'), OWL2Datatype.ALL_IRIS) # 1 self.assertIn( IRI('http://www.w3.org/2000/01/rdf-schema#Literal'), OWL2Datatype.ALL_IRIS) # 2 self.assertIn( IRI('http://www.w3.org/1999/02/22-rdf-syntax-ns#PlainLiteral'), OWL2Datatype.ALL_IRIS) # 3 self.assertIn( IRI('http://www.w3.org/2002/07/owl#real'), OWL2Datatype.ALL_IRIS) self.assertIn( IRI('http://www.w3.org/2002/07/owl#rational'), OWL2Datatype.ALL_IRIS) # 5 self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#string'), OWL2Datatype.ALL_IRIS) # 6 self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#normalizedString'), OWL2Datatype.ALL_IRIS) # 7 self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#token'), OWL2Datatype.ALL_IRIS) # 8 self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#language'), OWL2Datatype.ALL_IRIS) # 9 self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#Name'), OWL2Datatype.ALL_IRIS) # 10 self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#NCName'), OWL2Datatype.ALL_IRIS) # 11 self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#NMTOKEN'), OWL2Datatype.ALL_IRIS) # 12 self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#decimal'), OWL2Datatype.ALL_IRIS) # 13 self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#integer'), OWL2Datatype.ALL_IRIS) # 14 self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#nonNegativeInteger'), OWL2Datatype.ALL_IRIS) # 15 self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#nonPositiveInteger'), OWL2Datatype.ALL_IRIS) # 16 self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#positiveInteger'), OWL2Datatype.ALL_IRIS) # 17 self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#negativeInteger'), OWL2Datatype.ALL_IRIS) # 18 self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#long'), OWL2Datatype.ALL_IRIS) self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#int'), OWL2Datatype.ALL_IRIS) self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#short'), OWL2Datatype.ALL_IRIS) # 21 self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#byte'), OWL2Datatype.ALL_IRIS) self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#unsignedLong'), OWL2Datatype.ALL_IRIS) # 23 self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#unsignedInt'), OWL2Datatype.ALL_IRIS) # 24 self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#unsignedShort'), OWL2Datatype.ALL_IRIS) # 25 self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#unsignedByte'), OWL2Datatype.ALL_IRIS) # 26 self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#double'), OWL2Datatype.ALL_IRIS) # 27 self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#float'), OWL2Datatype.ALL_IRIS) # 28 self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#boolean'), OWL2Datatype.ALL_IRIS) # 29 self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#hexBinary'), OWL2Datatype.ALL_IRIS) # 30 self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#base64Binary'), OWL2Datatype.ALL_IRIS) # 31 self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#anyURI'), OWL2Datatype.ALL_IRIS) # 32 self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#dateTime'), OWL2Datatype.ALL_IRIS) # 33 self.assertIn( IRI('http://www.w3.org/2001/XMLSchema#dateTimeStamp'), OWL2Datatype.ALL_IRIS) # 34 def test_is_built_in(self): built_in_dtype_01 = IRI('http://www.w3.org/2001/XMLSchema#unsignedByte') built_in_dtype_02 = IRI('http://www.w3.org/2001/XMLSchema#integer') not_built_in_dtype = IRI('http://ex.org/dtype/integer') not_a_dtype_iri = 23 self.assertTrue(OWL2Datatype.is_built_in(built_in_dtype_01)) self.assertTrue(OWL2Datatype.is_built_in(built_in_dtype_02)) self.assertFalse(OWL2Datatype.is_built_in(not_built_in_dtype)) self.assertFalse(OWL2Datatype.is_built_in(not_a_dtype_iri)) def test__get_datatype(self): built_in_dtype_01 = IRI('http://www.w3.org/2001/XMLSchema#unsignedByte') built_in_dtype_02 = IRI('http://www.w3.org/2001/XMLSchema#integer') not_built_in_dtype = IRI('http://ex.org/dtype/integer') not_a_dtype_iri = 23 self.assertEqual( OWL2Datatype.XSD_UNSIGNED_BYTE, OWL2Datatype.get_datatype(built_in_dtype_01)) self.assertEqual( OWL2Datatype.XSD_INTEGER, OWL2Datatype.get_datatype(built_in_dtype_02)) self.assertRaises(OWLRuntimeException, OWL2Datatype.get_datatype, not_built_in_dtype) self.assertRaises(OWLRuntimeException, OWL2Datatype.get_datatype, not_a_dtype_iri) def test_is_numeric(self): self.assertTrue(OWL2Datatype.OWL_REAL.is_numeric()) self.assertTrue(OWL2Datatype.XSD_INTEGER.is_numeric()) self.assertTrue(OWL2Datatype.XSD_NEGATIVE_INTEGER.is_numeric()) self.assertFalse(OWL2Datatype.XSD_STRING.is_numeric()) self.assertFalse(OWL2Datatype.RDF_PLAIN_LITERAL.is_numeric()) self.assertFalse(OWL2Datatype.XSD_DATE_TIME.is_numeric()) def test_get_facets(self): self.assertEqual( Category.CAT_NUMBER.facets, OWL2Datatype.XSD_INTEGER.get_facets()) def test_get_datatype(self): self.fail('OWLDataFactory needs to be implemented first') def test_is_in_lexical_space_rdf_xmlliteral(self): # 01 lex01 = 'test01' lex02 = '<test<lala>blah/>' lex03 = '23' lex04 = '' self.assertTrue(OWL2Datatype.RDF_XML_LITERAL.is_in_lexical_space(lex01)) self.assertTrue(OWL2Datatype.RDF_XML_LITERAL.is_in_lexical_space(lex02)) self.assertTrue(OWL2Datatype.RDF_XML_LITERAL.is_in_lexical_space(lex03)) self.assertTrue(OWL2Datatype.RDF_XML_LITERAL.is_in_lexical_space(lex04)) def test_is_in_lexical_space_rdf_rdfs_literal(self): # 02 lex01 = 'test01' lex02 = '<test<lala>blah/>' lex03 = '23' lex04 = '' self.assertTrue(OWL2Datatype.RDFS_LITERAL.is_in_lexical_space(lex01)) self.assertTrue(OWL2Datatype.RDFS_LITERAL.is_in_lexical_space(lex02)) self.assertTrue(OWL2Datatype.RDFS_LITERAL.is_in_lexical_space(lex03)) self.assertTrue(OWL2Datatype.RDFS_LITERAL.is_in_lexical_space(lex04)) def test_is_in_lexical_space_rdf_plain_literal(self): # 03 lx1 = 'test01' lx2 = '<test<lala>blah/>' lx3 = '23' lx4 = '' self.assertTrue(OWL2Datatype.RDF_PLAIN_LITERAL.is_in_lexical_space(lx1)) self.assertTrue(OWL2Datatype.RDF_PLAIN_LITERAL.is_in_lexical_space(lx2)) self.assertTrue(OWL2Datatype.RDF_PLAIN_LITERAL.is_in_lexical_space(lx3)) self.assertTrue(OWL2Datatype.RDF_PLAIN_LITERAL.is_in_lexical_space(lx4)) def test_is_in_lexical_space_owl_real(self): # 04 """ http://www.w3.org/TR/owl2-syntax/#Real_Numbers.2C_Decimal_Numbers.2C_and_Integers --> 'The owl:real datatype does not directly provide any lexical forms.' """ lex01 = 'test01' lex02 = '<test<lala>blah/>' lex03 = '23' lex04 = '' self.assertTrue(OWL2Datatype.OWL_REAL.is_in_lexical_space(lex01)) self.assertTrue(OWL2Datatype.OWL_REAL.is_in_lexical_space(lex02)) self.assertTrue(OWL2Datatype.OWL_REAL.is_in_lexical_space(lex03)) self.assertTrue(OWL2Datatype.OWL_REAL.is_in_lexical_space(lex04)) def test_is_in_lexical_space_owl_rational(self): # 05 """ http://www.w3.org/TR/owl2-syntax/#Real_Numbers.2C_Decimal_Numbers.2C_and_Integers -->`The owl:rational datatype supports lexical forms defined by the following grammar (whitespace within the grammar MUST be ignored and MUST NOT be included in the lexical forms of owl:rational, and single quotes are used to introduce terminal symbols): numerator '/' denominator` """ # pos examples taken from https://www.w3.org/2007/OWL/wiki/OWL_Rational lex01 = '-1/3' lex02 = '41/7' lex03 = '0/19' lex04 = '4/2' lex05 = '-6/1' lex06 = '0' # fails with OWLAPI regex lex07 = '2' # fails with OWLAPI regex lex08 = '-6' # fails with OWLAPI regex # own deviations lex09 = '41 /7' lex10 = '41/ 7' lex11 = '41 / 7' lex12 = '41 / 7' lex13 = '2 ' # fails with OWLAPI regex # neg examples lex14 = 'test01' lex15 = '41//7' lex16 = '41/7/3' lex17 = '' lex18 = '/' self.assertTrue(OWL2Datatype.OWL_RATIONAL.is_in_lexical_space(lex01)) self.assertTrue(OWL2Datatype.OWL_RATIONAL.is_in_lexical_space(lex02)) self.assertTrue(OWL2Datatype.OWL_RATIONAL.is_in_lexical_space(lex03)) self.assertTrue(OWL2Datatype.OWL_RATIONAL.is_in_lexical_space(lex04)) self.assertTrue(OWL2Datatype.OWL_RATIONAL.is_in_lexical_space(lex05)) self.assertTrue(OWL2Datatype.OWL_RATIONAL.is_in_lexical_space(lex06)) self.assertTrue(OWL2Datatype.OWL_RATIONAL.is_in_lexical_space(lex07)) self.assertTrue(OWL2Datatype.OWL_RATIONAL.is_in_lexical_space(lex08)) self.assertTrue(OWL2Datatype.OWL_RATIONAL.is_in_lexical_space(lex09)) self.assertTrue(OWL2Datatype.OWL_RATIONAL.is_in_lexical_space(lex10)) self.assertTrue(OWL2Datatype.OWL_RATIONAL.is_in_lexical_space(lex11)) self.assertTrue(OWL2Datatype.OWL_RATIONAL.is_in_lexical_space(lex12)) self.assertTrue(OWL2Datatype.OWL_RATIONAL.is_in_lexical_space(lex13)) self.assertFalse(OWL2Datatype.OWL_RATIONAL.is_in_lexical_space(lex14)) self.assertFalse(OWL2Datatype.OWL_RATIONAL.is_in_lexical_space(lex15)) self.assertFalse(OWL2Datatype.OWL_RATIONAL.is_in_lexical_space(lex16)) self.assertFalse(OWL2Datatype.OWL_RATIONAL.is_in_lexical_space(lex17)) self.assertFalse(OWL2Datatype.OWL_RATIONAL.is_in_lexical_space(lex18)) def test_is_in_lexical_space_xsd_string(self): # 06 lex01 = 'test01' lex02 = '<test<lala> blah/>' lex03 = '23' lex04 = '' self.assertTrue(OWL2Datatype.OWL_REAL.is_in_lexical_space(lex01)) self.assertTrue(OWL2Datatype.OWL_REAL.is_in_lexical_space(lex02)) self.assertTrue(OWL2Datatype.OWL_REAL.is_in_lexical_space(lex03)) self.assertTrue(OWL2Datatype.OWL_REAL.is_in_lexical_space(lex04)) def test_is_in_lexical_space_xsd_norm_string(self): # 07 lex01 = 'test01' lex02 = '<test<lala>blah/>' lex03 = '23' lex04 = '' lex05 = 'test 01' lex06 = '<test\r<lala>blah/>' lex07 = '2\t3' lex08 = '\n' self.assertTrue( OWL2Datatype.XSD_NORMALIZED_STRING.is_in_lexical_space(lex01)) self.assertTrue( OWL2Datatype.XSD_NORMALIZED_STRING.is_in_lexical_space(lex02)) self.assertTrue( OWL2Datatype.XSD_NORMALIZED_STRING.is_in_lexical_space(lex03)) self.assertTrue( OWL2Datatype.XSD_NORMALIZED_STRING.is_in_lexical_space(lex04)) self.assertTrue( OWL2Datatype.XSD_NORMALIZED_STRING.is_in_lexical_space(lex05)) self.assertFalse( OWL2Datatype.XSD_NORMALIZED_STRING.is_in_lexical_space(lex06)) self.assertFalse( OWL2Datatype.XSD_NORMALIZED_STRING.is_in_lexical_space(lex07)) self.assertFalse( OWL2Datatype.XSD_NORMALIZED_STRING.is_in_lexical_space(lex08)) def test_is_in_lexical_space_xsd_token(self): # 08 lex01 = 'test01' lex02 = 'test 0 1' lex03 = '23' lex04 = '<test<lala>blah/>' lex05 = '<test <lala>blah/>' lex06 = ' test 0 1' lex07 = 'test 0 1 ' lex08 = ' test 0 1 ' lex09 = ' test\t0 1 ' lex10 = ' test 0 1 ' lex11 = ' test\n0 1 ' self.assertTrue(OWL2Datatype.XSD_TOKEN.is_in_lexical_space(lex01)) self.assertTrue(OWL2Datatype.XSD_TOKEN.is_in_lexical_space(lex02)) self.assertTrue(OWL2Datatype.XSD_TOKEN.is_in_lexical_space(lex03)) self.assertTrue(OWL2Datatype.XSD_TOKEN.is_in_lexical_space(lex04)) self.assertTrue(OWL2Datatype.XSD_TOKEN.is_in_lexical_space(lex05)) self.assertFalse(OWL2Datatype.XSD_TOKEN.is_in_lexical_space(lex06)) self.assertFalse(OWL2Datatype.XSD_TOKEN.is_in_lexical_space(lex07)) self.assertFalse(OWL2Datatype.XSD_TOKEN.is_in_lexical_space(lex08)) self.assertFalse(OWL2Datatype.XSD_TOKEN.is_in_lexical_space(lex09)) self.assertFalse(OWL2Datatype.XSD_TOKEN.is_in_lexical_space(lex10)) self.assertFalse(OWL2Datatype.XSD_TOKEN.is_in_lexical_space(lex11)) def test_is_in_lexical_space_xsd_language(self): # 09 lex01 = 'eng' lex02 = 'engengeng' # longer than 8 chars --> fail lex03 = '' # less than 1 char --> fail lex04 = 'sächs' # umlaut --> fail lex05 = 'eng-foo-bar-baz' lex06 = 'eng-foo1-bar2-baz3' lex07 = 'eng-lalalalala' # 2nd part longer than 8 chars --> fail lex08 = 'eng1-foo2-bar3-baz4' # number in first part --> fail lex09 = 'eng1-foo' # number in first part --> fail lex10 = '23' # fail self.assertTrue(OWL2Datatype.XSD_LANGUAGE.is_in_lexical_space(lex01)) self.assertFalse(OWL2Datatype.XSD_LANGUAGE.is_in_lexical_space(lex02)) self.assertFalse(OWL2Datatype.XSD_LANGUAGE.is_in_lexical_space(lex03)) self.assertFalse(OWL2Datatype.XSD_LANGUAGE.is_in_lexical_space(lex04)) self.assertTrue(OWL2Datatype.XSD_LANGUAGE.is_in_lexical_space(lex05)) self.assertTrue(OWL2Datatype.XSD_LANGUAGE.is_in_lexical_space(lex06)) self.assertFalse(OWL2Datatype.XSD_LANGUAGE.is_in_lexical_space(lex07)) self.assertFalse(OWL2Datatype.XSD_LANGUAGE.is_in_lexical_space(lex08)) self.assertFalse(OWL2Datatype.XSD_LANGUAGE.is_in_lexical_space(lex09)) self.assertFalse(OWL2Datatype.XSD_LANGUAGE.is_in_lexical_space(lex10)) def test_is_in_lexical_space_xsd_name(self): # 10 # pos/neg examples taken from # http://www.datypic.com/sc/xsd/t-xsd_Name.html lex01 = 'myElement' lex02 = '_my.Element' lex03 = 'my-element' lex04 = 'pre:myelement3' lex05 = '-myelement' lex06 = '3rdElement' lex07 = '' self.assertTrue(OWL2Datatype.XSD_NAME.is_in_lexical_space(lex01)) self.assertTrue(OWL2Datatype.XSD_NAME.is_in_lexical_space(lex02)) self.assertTrue(OWL2Datatype.XSD_NAME.is_in_lexical_space(lex03)) self.assertTrue(OWL2Datatype.XSD_NAME.is_in_lexical_space(lex04)) self.assertFalse(OWL2Datatype.XSD_NAME.is_in_lexical_space(lex05)) self.assertFalse(OWL2Datatype.XSD_NAME.is_in_lexical_space(lex06)) self.assertFalse(OWL2Datatype.XSD_NAME.is_in_lexical_space(lex07)) def test_is_in_lexical_space_xsd_ncname(self): # 11 # pos/neg examples taken from # http://www.datypic.com/sc/xsd/t-xsd_NCName.html lex01 = 'myElement' lex02 = '_my.Element' # only worked after altering orig. OWLAPI regex lex03 = 'my-element' # only worked after altering orig. OWLAPI regex lex04 = 'pre:myElement' lex05 = '-myelement' lex06 = '' self.assertTrue(OWL2Datatype.XSD_NCNAME.is_in_lexical_space(lex01)) self.assertTrue(OWL2Datatype.XSD_NCNAME.is_in_lexical_space(lex02)) self.assertTrue(OWL2Datatype.XSD_NCNAME.is_in_lexical_space(lex03)) self.assertFalse(OWL2Datatype.XSD_NCNAME.is_in_lexical_space(lex04)) self.assertFalse(OWL2Datatype.XSD_NCNAME.is_in_lexical_space(lex05)) self.assertFalse(OWL2Datatype.XSD_NCNAME.is_in_lexical_space(lex06)) def test_is_in_lexical_space_nmtoken(self): # 12 lex01 = 'test01' lex02 = '<test<lala> blah/>' lex03 = '23' lex04 = '' self.assertTrue(OWL2Datatype.XSD_NMTOKEN.is_in_lexical_space(lex01)) self.assertTrue(OWL2Datatype.XSD_NMTOKEN.is_in_lexical_space(lex02)) self.assertTrue(OWL2Datatype.XSD_NMTOKEN.is_in_lexical_space(lex03)) self.assertTrue(OWL2Datatype.XSD_NMTOKEN.is_in_lexical_space(lex04)) def test_is_in_lexical_space_xsd_decimal(self): # 13 # pos/neg examples taken from # http://www.datypic.com/sc/xsd/t-xsd_decimal.html lex01 = '3.0' lex02 = '-3.0' lex03 = '+3.5' lex04 = '3' lex05 = '.3' lex06 = '3.' lex07 = '0' lex08 = '-.3' lex09 = '0003.0' lex10 = '3.0000' lex11 = '3,5' # fail lex12 = '' # fail self.assertTrue(OWL2Datatype.XSD_DECIMAL.is_in_lexical_space(lex01)) self.assertTrue(OWL2Datatype.XSD_DECIMAL.is_in_lexical_space(lex02)) self.assertTrue(OWL2Datatype.XSD_DECIMAL.is_in_lexical_space(lex03)) self.assertTrue(OWL2Datatype.XSD_DECIMAL.is_in_lexical_space(lex04)) self.assertTrue(OWL2Datatype.XSD_DECIMAL.is_in_lexical_space(lex05)) self.assertTrue(OWL2Datatype.XSD_DECIMAL.is_in_lexical_space(lex06)) self.assertTrue(OWL2Datatype.XSD_DECIMAL.is_in_lexical_space(lex07)) self.assertTrue(OWL2Datatype.XSD_DECIMAL.is_in_lexical_space(lex08)) self.assertTrue(OWL2Datatype.XSD_DECIMAL.is_in_lexical_space(lex09)) self.assertTrue(OWL2Datatype.XSD_DECIMAL.is_in_lexical_space(lex10)) self.assertFalse(OWL2Datatype.XSD_DECIMAL.is_in_lexical_space(lex11)) self.assertFalse(OWL2Datatype.XSD_DECIMAL.is_in_lexical_space(lex12)) def test_is_in_lexical_space_xsd_integer(self): # 14 # pos/neg examples taken from # http://www.datypic.com/sc/xsd/t-xsd_integer.html lex01 = '122' lex02 = '00122' lex03 = '0' lex04 = '-3' lex05 = '+3' lex06 = '3.' # fail lex07 = '3.0' # fail lex08 = '' # fail self.assertTrue(OWL2Datatype.XSD_INTEGER.is_in_lexical_space(lex01)) self.assertTrue(OWL2Datatype.XSD_INTEGER.is_in_lexical_space(lex02)) self.assertTrue(OWL2Datatype.XSD_INTEGER.is_in_lexical_space(lex03)) self.assertTrue(OWL2Datatype.XSD_INTEGER.is_in_lexical_space(lex04)) self.assertTrue(OWL2Datatype.XSD_INTEGER.is_in_lexical_space(lex05)) self.assertFalse(OWL2Datatype.XSD_INTEGER.is_in_lexical_space(lex06)) self.assertFalse(OWL2Datatype.XSD_INTEGER.is_in_lexical_space(lex07)) self.assertFalse(OWL2Datatype.XSD_INTEGER.is_in_lexical_space(lex08)) def test_is_in_lexical_space_xsd_non_negative_integer(self): # 15 # pos/neg examples taken from # http://www.datypic.com/sc/xsd/t-xsd_nonNegativeInteger.html lex01 = '+3' lex02 = '122' lex03 = '0' lex04 = '00122' lex05 = '-3' # negative --> fail lex06 = '3.0' # decimal point --> fail lex07 = '' self.assertTrue( OWL2Datatype.XSD_NON_NEGATIVE_INTEGER.is_in_lexical_space(lex01)) self.assertTrue( OWL2Datatype.XSD_NON_NEGATIVE_INTEGER.is_in_lexical_space(lex02)) self.assertTrue( OWL2Datatype.XSD_NON_NEGATIVE_INTEGER.is_in_lexical_space(lex03)) self.assertTrue( OWL2Datatype.XSD_NON_NEGATIVE_INTEGER.is_in_lexical_space(lex04)) self.assertFalse( OWL2Datatype.XSD_NON_NEGATIVE_INTEGER.is_in_lexical_space(lex05)) self.assertFalse( OWL2Datatype.XSD_NON_NEGATIVE_INTEGER.is_in_lexical_space(lex06)) self.assertFalse( OWL2Datatype.XSD_NON_NEGATIVE_INTEGER.is_in_lexical_space(lex07)) def test_is_in_lexical_space_xsd_non_positive_integer(self): # 16 # pos/neg examples taken from # http://www.datypic.com/sc/xsd/t-xsd_nonPositiveInteger.html lex01 = '-3' lex02 = '0' lex03 = '-00122' lex04 = '122' # fail lex05 = '+3' # fail lex06 = '3.0' # fail lex07 = '' # fail self.assertTrue( OWL2Datatype.XSD_NON_POSITIVE_INTEGER.is_in_lexical_space(lex01)) self.assertTrue( OWL2Datatype.XSD_NON_POSITIVE_INTEGER.is_in_lexical_space(lex02)) self.assertTrue( OWL2Datatype.XSD_NON_POSITIVE_INTEGER.is_in_lexical_space(lex03)) self.assertFalse( OWL2Datatype.XSD_NON_POSITIVE_INTEGER.is_in_lexical_space(lex04)) self.assertFalse( OWL2Datatype.XSD_NON_POSITIVE_INTEGER.is_in_lexical_space(lex05)) self.assertFalse( OWL2Datatype.XSD_NON_POSITIVE_INTEGER.is_in_lexical_space(lex06)) self.assertFalse( OWL2Datatype.XSD_NON_POSITIVE_INTEGER.is_in_lexical_space(lex07)) def test_is_in_lexical_space_xsd_positive_integer(self): # 17 # pos/neg examples taken from # http://www.datypic.com/sc/xsd/t-xsd_positiveInteger.html lex01 = '122' lex02 = '+3' lex03 = '00122' lex04 = '0' # fail lex05 = '-3' # fail lex06 = '3.0' # fail lex07 = '' # fail self.assertTrue( OWL2Datatype.XSD_POSITIVE_INTEGER.is_in_lexical_space(lex01)) self.assertTrue( OWL2Datatype.XSD_POSITIVE_INTEGER.is_in_lexical_space(lex02)) self.assertTrue( OWL2Datatype.XSD_POSITIVE_INTEGER.is_in_lexical_space(lex03)) self.assertFalse( OWL2Datatype.XSD_POSITIVE_INTEGER.is_in_lexical_space(lex04)) self.assertFalse( OWL2Datatype.XSD_POSITIVE_INTEGER.is_in_lexical_space(lex05)) self.assertFalse( OWL2Datatype.XSD_POSITIVE_INTEGER.is_in_lexical_space(lex06)) self.assertFalse( OWL2Datatype.XSD_POSITIVE_INTEGER.is_in_lexical_space(lex07)) def test_is_in_lexical_space_xsd_negative_integer(self): # 18 # pos/neg examples taken from # http://www.datypic.com/sc/xsd/t-xsd_negativeInteger.html lex01 = '-3' lex02 = '-00122' lex03 = '0' # fail lex04 = '122' # fail lex05 = '+3' # fail lex06 = '3.0' # fail lex07 = '' self.assertTrue( OWL2Datatype.XSD_NEGATIVE_INTEGER.is_in_lexical_space(lex01)) self.assertTrue( OWL2Datatype.XSD_NEGATIVE_INTEGER.is_in_lexical_space(lex02)) self.assertFalse( OWL2Datatype.XSD_NEGATIVE_INTEGER.is_in_lexical_space(lex03)) self.assertFalse( OWL2Datatype.XSD_NEGATIVE_INTEGER.is_in_lexical_space(lex04)) self.assertFalse( OWL2Datatype.XSD_NEGATIVE_INTEGER.is_in_lexical_space(lex05)) self.assertFalse( OWL2Datatype.XSD_NEGATIVE_INTEGER.is_in_lexical_space(lex06)) self.assertFalse( OWL2Datatype.XSD_NEGATIVE_INTEGER.is_in_lexical_space(lex07)) def test_is_in_lexical_space_xsd_long(self): # 19 # pos/neg examples taken from # http://www.datypic.com/sc/xsd/t-xsd_long.html lex01 = '+3' lex02 = '122' lex03 = '0' lex04 = '-1231235555' # not checked in OWLAPI (maybe not possible with regex at all) lex05 = '9223372036854775810' # too large --> fail lex06 = '3.0' # decimal point --> fail lex07 = '' self.assertTrue(OWL2Datatype.XSD_LONG.is_in_lexical_space(lex01)) self.assertTrue(OWL2Datatype.XSD_LONG.is_in_lexical_space(lex02)) self.assertTrue(OWL2Datatype.XSD_LONG.is_in_lexical_space(lex03)) self.assertTrue(OWL2Datatype.XSD_LONG.is_in_lexical_space(lex04)) # self.assertFalse(OWL2Datatype.XSD_LONG.is_in_lexical_space(lex05)) self.assertFalse(OWL2Datatype.XSD_LONG.is_in_lexical_space(lex06)) self.assertFalse(OWL2Datatype.XSD_LONG.is_in_lexical_space(lex07)) def test_is_in_lexical_space_xsd_int(self): # 20 # pos/neg examples taken from # http://www.datypic.com/sc/xsd/t-xsd_int.html lex01 = '+3' lex02 = '122' lex03 = '0' lex04 = '-12312' lex05 = '3.0' # decimal point --> fail lex06 = '' # fail self.assertTrue(OWL2Datatype.XSD_INT.is_in_lexical_space(lex01)) self.assertTrue(OWL2Datatype.XSD_INT.is_in_lexical_space(lex02)) self.assertTrue(OWL2Datatype.XSD_INT.is_in_lexical_space(lex03)) self.assertTrue(OWL2Datatype.XSD_INT.is_in_lexical_space(lex04)) self.assertFalse(OWL2Datatype.XSD_INT.is_in_lexical_space(lex05)) self.assertFalse(OWL2Datatype.XSD_INT.is_in_lexical_space(lex06)) def test_is_in_lexical_space_xsd_short(self): # 21 # pos/neg examples taken from # http://www.datypic.com/sc/xsd/t-xsd_short.html lex01 = '+3' lex02 = '122' lex03 = '0' lex04 = '-1231' lex05 = '3.0' # fail lex06 = '' # fail self.assertTrue(OWL2Datatype.XSD_SHORT.is_in_lexical_space(lex01)) self.assertTrue(OWL2Datatype.XSD_SHORT.is_in_lexical_space(lex02)) self.assertTrue(OWL2Datatype.XSD_SHORT.is_in_lexical_space(lex03)) self.assertTrue(OWL2Datatype.XSD_SHORT.is_in_lexical_space(lex04)) self.assertFalse(OWL2Datatype.XSD_SHORT.is_in_lexical_space(lex05)) self.assertFalse(OWL2Datatype.XSD_SHORT.is_in_lexical_space(lex06)) def test_is_in_lexical_space_xsd_byte(self): # 22 # pos/neg examples taken from # http://www.datypic.com/sc/xsd/t-xsd_byte.html lex01 = '+3' lex02 = '122' lex03 = '0' lex04 = '-123' lex05 = '3.0' # fail lex06 = '' # fail self.assertTrue(OWL2Datatype.XSD_BYTE.is_in_lexical_space(lex01)) self.assertTrue(OWL2Datatype.XSD_BYTE.is_in_lexical_space(lex02)) self.assertTrue(OWL2Datatype.XSD_BYTE.is_in_lexical_space(lex03)) self.assertTrue(OWL2Datatype.XSD_BYTE.is_in_lexical_space(lex04)) self.assertFalse(OWL2Datatype.XSD_BYTE.is_in_lexical_space(lex05)) self.assertFalse(OWL2Datatype.XSD_BYTE.is_in_lexical_space(lex06)) def test_is_in_lexical_space_xsd_unsigned_long(self): # 23 # pos/neg examples taken from # http://www.datypic.com/sc/xsd/t-xsd_unsignedLong.html lex01 = '+3' lex02 = '122' lex03 = '0' lex04 = '-123' # fail lex05 = '3.0' # fail lex06 = '' self.assertTrue( OWL2Datatype.XSD_UNSIGNED_LONG.is_in_lexical_space(lex01)) self.assertTrue( OWL2Datatype.XSD_UNSIGNED_LONG.is_in_lexical_space(lex02)) self.assertTrue( OWL2Datatype.XSD_UNSIGNED_LONG.is_in_lexical_space(lex03)) self.assertFalse( OWL2Datatype.XSD_UNSIGNED_LONG.is_in_lexical_space(lex04)) self.assertFalse( OWL2Datatype.XSD_UNSIGNED_LONG.is_in_lexical_space(lex05)) self.assertFalse( OWL2Datatype.XSD_UNSIGNED_LONG.is_in_lexical_space(lex06)) def test_is_in_lexical_space_xsd_unsigned_int(self): # 24 # pos/neg examples taken from # http://www.datypic.com/sc/xsd/t-xsd_unsignedInt.html lex01 = '+3' lex02 = '122' lex03 = '0' lex04 = '-123' # fail lex05 = '3.0' # fail lex06 = '' # fail self.assertTrue( OWL2Datatype.XSD_UNSIGNED_INT.is_in_lexical_space(lex01)) self.assertTrue( OWL2Datatype.XSD_UNSIGNED_INT.is_in_lexical_space(lex02)) self.assertTrue( OWL2Datatype.XSD_UNSIGNED_INT.is_in_lexical_space(lex03)) self.assertFalse( OWL2Datatype.XSD_UNSIGNED_INT.is_in_lexical_space(lex04)) self.assertFalse( OWL2Datatype.XSD_UNSIGNED_INT.is_in_lexical_space(lex05)) self.assertFalse( OWL2Datatype.XSD_UNSIGNED_INT.is_in_lexical_space(lex06)) def test_is_in_lexical_space_xsd_unsigned_short(self): # 25 # pos/neg examples taken from # http://www.datypic.com/sc/xsd/t-xsd_unsignedShort.html lex01 = '+3' lex02 = '122' lex03 = '0' lex04 = '-123' # fail lex05 = '3.0' # fail lex06 = '' # fail self.assertTrue( OWL2Datatype.XSD_UNSIGNED_SHORT.is_in_lexical_space(lex01)) self.assertTrue( OWL2Datatype.XSD_UNSIGNED_SHORT.is_in_lexical_space(lex02)) self.assertTrue( OWL2Datatype.XSD_UNSIGNED_SHORT.is_in_lexical_space(lex03)) self.assertFalse( OWL2Datatype.XSD_UNSIGNED_SHORT.is_in_lexical_space(lex04)) self.assertFalse( OWL2Datatype.XSD_UNSIGNED_SHORT.is_in_lexical_space(lex05)) self.assertFalse( OWL2Datatype.XSD_UNSIGNED_SHORT.is_in_lexical_space(lex06)) def test_is_in_lexical_space_xsd_unsigned_byte(self): # 26 # pos/neg examples taken from # http://www.datypic.com/sc/xsd/t-xsd_unsignedByte.html lex01 = '+3' lex02 = '122' lex03 = '0' lex04 = '-123' # fail lex05 = '3.0' # fail lex06 = '' # fail self.assertTrue( OWL2Datatype.XSD_UNSIGNED_BYTE.is_in_lexical_space(lex01)) self.assertTrue( OWL2Datatype.XSD_UNSIGNED_BYTE.is_in_lexical_space(lex02)) self.assertTrue( OWL2Datatype.XSD_UNSIGNED_BYTE.is_in_lexical_space(lex03)) self.assertFalse( OWL2Datatype.XSD_UNSIGNED_BYTE.is_in_lexical_space(lex04)) self.assertFalse( OWL2Datatype.XSD_UNSIGNED_BYTE.is_in_lexical_space(lex05)) self.assertFalse( OWL2Datatype.XSD_UNSIGNED_BYTE.is_in_lexical_space(lex06)) def test_is_in_lexical_space_xsd_double(self): # 27 # pos/neg exampes taken from # http://www.datypic.com/sc/xsd/t-xsd_double.html lex01 = '-3E2' lex02 = '4268.22752E11' lex03 = '+24.3e-3' lex04 = '12' lex05 = '+3.5' lex06 = '-INF' lex07 = '-0' lex08 = 'NaN' lex09 = '-3E2.4' # the exponent must be an integer --> fail lex10 = '12E' # fail lex11 = 'NAN' # wrong capitalization --> fail lex12 = '' self.assertTrue(OWL2Datatype.XSD_DOUBLE.is_in_lexical_space(lex01)) self.assertTrue(OWL2Datatype.XSD_DOUBLE.is_in_lexical_space(lex02)) self.assertTrue(OWL2Datatype.XSD_DOUBLE.is_in_lexical_space(lex03)) self.assertTrue(OWL2Datatype.XSD_DOUBLE.is_in_lexical_space(lex04)) self.assertTrue(OWL2Datatype.XSD_DOUBLE.is_in_lexical_space(lex05)) self.assertTrue(OWL2Datatype.XSD_DOUBLE.is_in_lexical_space(lex06)) self.assertTrue(OWL2Datatype.XSD_DOUBLE.is_in_lexical_space(lex07)) self.assertTrue(OWL2Datatype.XSD_DOUBLE.is_in_lexical_space(lex08)) self.assertFalse(OWL2Datatype.XSD_DOUBLE.is_in_lexical_space(lex09)) self.assertFalse(OWL2Datatype.XSD_DOUBLE.is_in_lexical_space(lex10)) self.assertFalse(OWL2Datatype.XSD_DOUBLE.is_in_lexical_space(lex11)) self.assertFalse(OWL2Datatype.XSD_DOUBLE.is_in_lexical_space(lex12)) def test_is_in_lexical_space_xsd_float(self): # 28 # pos/neg examples taken from # http://www.datypic.com/sc/xsd/t-xsd_float.html lex01 = '-3E2' lex02 = '4268.22752E11' lex03 = '+24.3e-3' lex04 = '12' lex05 = '+3.5' lex06 = '-INF' lex07 = '-0' lex08 = 'NaN' lex09 = '-3E2.4' # the exponent must be an integer --> fail lex10 = '12E' # fail lex11 = 'NAN' # wrong capitalization --> fail lex12 = '' self.assertTrue(OWL2Datatype.XSD_FLOAT.is_in_lexical_space(lex01)) self.assertTrue(OWL2Datatype.XSD_FLOAT.is_in_lexical_space(lex02)) self.assertTrue(OWL2Datatype.XSD_FLOAT.is_in_lexical_space(lex03)) self.assertTrue(OWL2Datatype.XSD_FLOAT.is_in_lexical_space(lex04)) self.assertTrue(OWL2Datatype.XSD_FLOAT.is_in_lexical_space(lex05)) self.assertTrue(OWL2Datatype.XSD_FLOAT.is_in_lexical_space(lex06)) self.assertTrue(OWL2Datatype.XSD_FLOAT.is_in_lexical_space(lex07)) self.assertTrue(OWL2Datatype.XSD_FLOAT.is_in_lexical_space(lex08)) self.assertFalse(OWL2Datatype.XSD_FLOAT.is_in_lexical_space(lex09)) self.assertFalse(OWL2Datatype.XSD_FLOAT.is_in_lexical_space(lex10)) self.assertFalse(OWL2Datatype.XSD_FLOAT.is_in_lexical_space(lex11)) self.assertFalse(OWL2Datatype.XSD_FLOAT.is_in_lexical_space(lex12)) def test_is_in_lexical_space_xsd_boolean(self): # 29 # pos/neg examples taken from # http://www.datypic.com/sc/xsd/t-xsd_boolean.html lex01 = 'true' lex02 = 'false' lex03 = '0' lex04 = '1' lex05 = 'TRUE' # wrong capitalization --> fail lex06 = 'T' # fail lex07 = '' self.assertTrue(OWL2Datatype.XSD_BOOLEAN.is_in_lexical_space(lex01)) self.assertTrue(OWL2Datatype.XSD_BOOLEAN.is_in_lexical_space(lex02)) self.assertTrue(OWL2Datatype.XSD_BOOLEAN.is_in_lexical_space(lex03)) self.assertTrue(OWL2Datatype.XSD_BOOLEAN.is_in_lexical_space(lex04)) self.assertFalse(OWL2Datatype.XSD_BOOLEAN.is_in_lexical_space(lex05)) self.assertFalse(OWL2Datatype.XSD_BOOLEAN.is_in_lexical_space(lex06)) self.assertFalse(OWL2Datatype.XSD_BOOLEAN.is_in_lexical_space(lex07)) def test_is_in_lexical_space_xsd_hex_binary(self): # 30 # pos/neg examples taken from # http://www.datypic.com/sc/xsd/t-xsd_hexBinary.html lex01 = '0FB8' lex02 = '0fb8' lex03 = '' lex04 = 'FB8' # odd number of characters --> fail self.assertTrue(OWL2Datatype.XSD_HEX_BINARY.is_in_lexical_space(lex01)) self.assertTrue(OWL2Datatype.XSD_HEX_BINARY.is_in_lexical_space(lex02)) self.assertTrue(OWL2Datatype.XSD_HEX_BINARY.is_in_lexical_space(lex03)) self.assertFalse(OWL2Datatype.XSD_HEX_BINARY.is_in_lexical_space(lex04)) def test_is_in_lexical_space_xsd_base64_binary(self): # 31 # pos/neg examples taken from # http://www.datypic.com/sc/xsd/t-xsd_base64Binary.html lex01 = '0FB8' lex02 = '0fb8' lex03 = '0 FB8 0F+9' lex04 = '0F+40A==' lex05 = '' lex06 = 'FB8' # odd number of characters --> fail lex07 = '==0F' # equals signs may only appear at the end --> fail self.assertTrue( OWL2Datatype.XSD_BASE_64_BINARY.is_in_lexical_space(lex01)) self.assertTrue( OWL2Datatype.XSD_BASE_64_BINARY.is_in_lexical_space(lex02)) self.assertTrue( OWL2Datatype.XSD_BASE_64_BINARY.is_in_lexical_space(lex03)) self.assertTrue( OWL2Datatype.XSD_BASE_64_BINARY.is_in_lexical_space(lex04)) self.assertTrue( OWL2Datatype.XSD_BASE_64_BINARY.is_in_lexical_space(lex05)) self.assertFalse( OWL2Datatype.XSD_BASE_64_BINARY.is_in_lexical_space(lex06)) self.assertFalse( OWL2Datatype.XSD_BASE_64_BINARY.is_in_lexical_space(lex07)) def test_is_in_lexical_space_xsd_any_uri(self): # 32 # pos/neg examples taken from # http://www.datypic.com/sc/xsd/t-xsd_anyURI.html lex01 = 'http://datypic.com' lex02 = 'mailto:info@datypic.com' lex03 = '../%C3%A9dition.html' lex04 = '../édition.html' lex05 = 'http://datypic.com/prod.html#shirt' lex06 = '../prod.html#shirt' lex07 = 'urn:example:org' lex08 = '' # OWLAPI uses wildcard regex and hence the negative examples do not work lex09 = 'http://datypic.com#frag1#frag2' # two #'s --> fail # % character followed by something other than two hexadecimal digits lex10 = 'http://datypic.com#f% rag' # --> fail self.assertTrue(OWL2Datatype.XSD_ANY_URI.is_in_lexical_space(lex01)) self.assertTrue(OWL2Datatype.XSD_ANY_URI.is_in_lexical_space(lex02)) self.assertTrue(OWL2Datatype.XSD_ANY_URI.is_in_lexical_space(lex03)) self.assertTrue(OWL2Datatype.XSD_ANY_URI.is_in_lexical_space(lex04)) self.assertTrue(OWL2Datatype.XSD_ANY_URI.is_in_lexical_space(lex05)) self.assertTrue(OWL2Datatype.XSD_ANY_URI.is_in_lexical_space(lex06)) self.assertTrue(OWL2Datatype.XSD_ANY_URI.is_in_lexical_space(lex07)) self.assertTrue(OWL2Datatype.XSD_ANY_URI.is_in_lexical_space(lex08)) # self.assertFalse(OWL2Datatype.XSD_ANY_URI.is_in_lexical_space(lex09)) # self.assertFalse(OWL2Datatype.XSD_ANY_URI.is_in_lexical_space(lex10)) def test_is_in_lexical_space_xsd_date_time(self): # 33 # pos/neg examples taken from # http://www.datypic.com/sc/xsd/t-xsd_dateTime.html lex01 = '2004-04-12T13:20:00' lex02 = '2004-04-12T13:20:15.5' lex03 = '2004-04-12T13:20:00-05:00' lex04 = '2004-04-12T13:20:00Z' lex05 = '2004-04-12T13:00' # no seconds specified --> fail lex06 = '2004-04-1213:20:00' # letter T missing --> fail lex07 = '99-04-12T13:00' # truncated century --> fail lex08 = '2004-04-12' # time missing --> fail lex09 = '' self.assertTrue(OWL2Datatype.XSD_DATE_TIME.is_in_lexical_space(lex01)) self.assertTrue(OWL2Datatype.XSD_DATE_TIME.is_in_lexical_space(lex02)) self.assertTrue(OWL2Datatype.XSD_DATE_TIME.is_in_lexical_space(lex03)) self.assertTrue(OWL2Datatype.XSD_DATE_TIME.is_in_lexical_space(lex04)) self.assertFalse(OWL2Datatype.XSD_DATE_TIME.is_in_lexical_space(lex05)) self.assertFalse(OWL2Datatype.XSD_DATE_TIME.is_in_lexical_space(lex06)) self.assertFalse(OWL2Datatype.XSD_DATE_TIME.is_in_lexical_space(lex07)) self.assertFalse(OWL2Datatype.XSD_DATE_TIME.is_in_lexical_space(lex08)) self.assertFalse(OWL2Datatype.XSD_DATE_TIME.is_in_lexical_space(lex09)) def test_is_in_lexical_space_xsd_date_time_stamp(self): # 34 # pos/neg examples taken from # http://www.datypic.com/sc/xsd11/t-xsd_dateTimeStamp.html lex01 = '2004-04-12T13:20:00-05:00' lex02 = '2004-04-12T13:20:00Z' lex03 = '2004-04-12T13:20:00' # time zone missing --> fail lex04 = '2004-04-12T13:00Z' # seconds missing --> fail lex05 = '2004-04-12Z' # time missing lex06 = '' self.assertTrue( OWL2Datatype.XSD_DATE_TIME_STAMP.is_in_lexical_space(lex01)) self.assertTrue( OWL2Datatype.XSD_DATE_TIME_STAMP.is_in_lexical_space(lex02)) self.assertFalse( OWL2Datatype.XSD_DATE_TIME_STAMP.is_in_lexical_space(lex03)) self.assertFalse( OWL2Datatype.XSD_DATE_TIME_STAMP.is_in_lexical_space(lex04)) self.assertFalse( OWL2Datatype.XSD_DATE_TIME_STAMP.is_in_lexical_space(lex05)) self.assertFalse( OWL2Datatype.XSD_DATE_TIME_STAMP.is_in_lexical_space(lex06)) # ------------------- original tests from OWLAPI ------------------- def test_should_return_correct_prefix_name_for_xmlliteral(self): prefixed_name = OWL2Datatype.RDF_XML_LITERAL.prefixed_name self.assertEqual('rdf:XMLLiteral', prefixed_name) def test_should_return_correct_prefix_name_for_literal(self): prefixed_name = OWL2Datatype.RDFS_LITERAL.prefixed_name self.assertEqual('rdfs:Literal', prefixed_name) def test_should_return_correct_prefix_name_for_plain_literal(self): prefixed_name = OWL2Datatype.RDF_PLAIN_LITERAL.prefixed_name self.assertEqual('rdf:PlainLiteral', prefixed_name) def test_should_return_correct_prefix_name_for_real(self): prefixed_name = OWL2Datatype.OWL_REAL.prefixed_name self.assertEqual('owl:real', prefixed_name) def test_should_return_correct_prefix_name_for_rational(self): prefixed_name = OWL2Datatype.OWL_RATIONAL.prefixed_name self.assertEqual('owl:rational', prefixed_name) def test_should_return_correct_prefix_name_for_string(self): prefixed_name = OWL2Datatype.XSD_STRING.prefixed_name self.assertEqual('xsd:string', prefixed_name) def test_should_return_correct_prefix_name_for_normalizedString(self): prefixed_name = OWL2Datatype.XSD_NORMALIZED_STRING.prefixed_name self.assertEqual('xsd:normalizedString', prefixed_name) def test_should_return_correct_prefix_name_for_token(self): prefixed_name = OWL2Datatype.XSD_TOKEN.prefixed_name self.assertEqual('xsd:token', prefixed_name) def test_should_return_correct_prefix_name_for_language(self): prefixed_name = OWL2Datatype.XSD_LANGUAGE.prefixed_name self.assertEqual('xsd:language', prefixed_name) def test_should_return_correct_prefix_name_for_name(self): prefixed_name = OWL2Datatype.XSD_NAME.prefixed_name self.assertEqual('xsd:Name', prefixed_name) def test_should_return_correct_prefix_name_for_ncname(self): prefixed_name = OWL2Datatype.XSD_NCNAME.prefixed_name self.assertEqual('xsd:NCName', prefixed_name) def test_should_return_correct_prefix_name_for_nmtoken(self): prefixed_name = OWL2Datatype.XSD_NMTOKEN.prefixed_name self.assertEqual('xsd:NMTOKEN', prefixed_name) def test_should_return_correct_prefix_name_for_decimal(self): prefixed_name = OWL2Datatype.XSD_DECIMAL.prefixed_name self.assertEqual('xsd:decimal', prefixed_name) def test_should_return_correct_prefix_name_for_integer(self): prefixed_name = OWL2Datatype.XSD_INTEGER.prefixed_name self.assertEqual('xsd:integer', prefixed_name) def test_should_return_correct_prefix_name_for_non_negative_integer(self): prefixed_name = OWL2Datatype.XSD_NON_NEGATIVE_INTEGER.prefixed_name self.assertEqual('xsd:nonNegativeInteger', prefixed_name) def test_should_return_correct_prefix_name_for_non_positive_integer(self): prefixed_name = OWL2Datatype.XSD_NON_POSITIVE_INTEGER.prefixed_name self.assertEqual('xsd:nonPositiveInteger', prefixed_name) def test_should_return_correct_prefix_name_for_positive_integer(self): prefixed_name = OWL2Datatype.XSD_POSITIVE_INTEGER.prefixed_name self.assertEqual('xsd:positiveInteger', prefixed_name) def test_should_return_correct_prefix_name_for_negative_integer(self): prefixed_name = OWL2Datatype.XSD_NEGATIVE_INTEGER.prefixed_name self.assertEqual('xsd:negativeInteger', prefixed_name) def test_should_return_correct_prefix_name_for_long(self): prefixed_name = OWL2Datatype.XSD_LONG.prefixed_name self.assertEqual('xsd:long', prefixed_name) def test_should_return_correct_prefix_name_for_int(self): prefixed_name = OWL2Datatype.XSD_INT.prefixed_name self.assertEqual('xsd:int', prefixed_name) def test_should_return_correct_prefix_name_for_short(self): prefixed_name = OWL2Datatype.XSD_SHORT.prefixed_name self.assertEqual('xsd:short', prefixed_name) def test_should_return_correct_prefix_name_for_byte(self): prefixed_name = OWL2Datatype.XSD_BYTE.prefixed_name self.assertEqual('xsd:byte', prefixed_name) def test_should_return_correct_prefix_name_for_unsigned_long(self): prefixed_name = OWL2Datatype.XSD_UNSIGNED_LONG.prefixed_name self.assertEqual('xsd:unsignedLong', prefixed_name) def test_should_return_correct_prefix_name_for_unsigned_int(self): prefixed_name = OWL2Datatype.XSD_UNSIGNED_INT.prefixed_name self.assertEqual('xsd:unsignedInt', prefixed_name) def test_should_return_correct_prefix_name_for_unsigned_short(self): prefixed_name = OWL2Datatype.XSD_UNSIGNED_SHORT.prefixed_name self.assertEqual('xsd:unsignedShort', prefixed_name) def test_should_return_correct_prefix_name_for_unsigned_byte(self): prefixed_name = OWL2Datatype.XSD_UNSIGNED_BYTE.prefixed_name self.assertEqual('xsd:unsignedByte', prefixed_name) def test_should_return_correct_prefix_name_for_double(self): prefixed_name = OWL2Datatype.XSD_DOUBLE.prefixed_name self.assertEqual('xsd:double', prefixed_name) def test_should_return_correct_prefix_name_for_float(self): prefixed_name = OWL2Datatype.XSD_FLOAT.prefixed_name self.assertEqual('xsd:float', prefixed_name) def test_should_return_correct_prefix_name_for_boolean(self): prefixed_name = OWL2Datatype.XSD_BOOLEAN.prefixed_name self.assertEqual('xsd:boolean', prefixed_name) def test_should_return_correct_prefix_name_for_hex_binary(self): prefixed_name = OWL2Datatype.XSD_HEX_BINARY.prefixed_name self.assertEqual('xsd:hexBinary', prefixed_name) def test_should_return_correct_prefix_name_for_base64_binary(self): prefixed_name = OWL2Datatype.XSD_BASE_64_BINARY.prefixed_name self.assertEqual('xsd:base64Binary', prefixed_name) def test_should_return_correct_prefix_name_for_any_uri(self): prefixed_name = OWL2Datatype.XSD_ANY_URI.prefixed_name self.assertEqual('xsd:anyURI', prefixed_name) def test_should_return_correct_prefix_name_for_date_time(self): prefixed_name = OWL2Datatype.XSD_DATE_TIME.prefixed_name self.assertEqual('xsd:dateTime', prefixed_name) def test_should_return_correct_prefix_name_for_date_timestamp(self): prefixed_name = OWL2Datatype.XSD_DATE_TIME_STAMP.prefixed_name self.assertEqual('xsd:dateTimeStamp', prefixed_name)
CompPhysics/ThesisProjects
refs/heads/master
doc/MSc/msc_students/former/AudunHansen/Audun/Pythonscripts/matrix_transforms.py
1
from numpy import * class transforms(): def __init__(self, Np, Nh): self.Np = Np self.Nh = Nh self.Tab = zeros((Np**2), dtype = int) for b in range(Np): self.Tab[range(b*Np,(b+1)*Np)] = range(b,Np**2, Np) self.Tai = zeros((Np*Nh), dtype = int) for a in range(Nh): self.Tai[range(a*Np,(a+1)*Np)] = range(b,Np*Nh, Nh) self.Tij = zeros((Nh**2), dtype = int) for i in range(Nh): self.Tij[range(i*Nh,(i+1)*Nh)] = range(i,Nh**2,Nh) def abij2baji(self,M): return M[self.Tab][:, self.Tij] def abij2baji_(self,M): Z = zeros((Np*Np,Nh*Nh)) for i in range(Nh): for j in range(Nh): for a in range(Np): for b in range(Np): Z[a + b*Np, i + j * Nh] = M[b + a*Np, j + i*Nh] return Z def abij2aibj_(self,M): Z = zeros((Nh*Np,Nh*Np)) Z2 = zeros((Nh*Np,Nh*Np)) for i in range(self.Nh): for j in range(self.Nh): for a in range(self.Np): for b in range(self.Np): #TL3[a + i*Np, b + j*Np] = T[a + b*Np, i + j*Nh] Z[a + i*self.Np, b + j*self.Np] = M[a + b*Np, i + j*Nh] return Z def abij2aibj(self,M): Z = zeros((Nh*Np,Nh*Np)) print M[self.Tai, :] #return M[self.Tai][:, self.Tai] class transformer(): def __init__(self, Np, Nh): self.Np = Np self.Nh = Nh self.init_index_reorganization() def init_index_reorganization(self): Np = self.Np Nh = self.Nh self.Pai_bjA = zeros((Np*Nh,Nh*Np), dtype = int) self.Pai_bjB = zeros((Np*Nh,Nh*Np), dtype = int) self.L3_A = zeros((Np**2,Nh**2), dtype = int) self.L3_B = zeros((Np**2,Nh**2), dtype = int) self.Pbj_aiA = zeros((Np*Nh,Nh*Np), dtype = int) self.Pbj_aiB = zeros((Np*Nh,Nh*Np), dtype = int) self.Pi_jabA = zeros((Nh,Nh*Np**2), dtype = int) self.Pi_jabB = zeros((Nh,Nh*Np**2), dtype = int) self.Pa_bjiA = zeros((Np,Np*Nh**2), dtype = int) self.Pa_bjiB = zeros((Np,Np*Nh**2), dtype = int) self.Pb_aijA = zeros((Np,Np*Nh**2), dtype = int) self.Pb_aijB = zeros((Np,Np*Nh**2), dtype = int) self.Q2_A = zeros((Np**2, Nh**2), dtype = int) self.Q2_B = zeros((Np**2, Nh**2), dtype = int) self.Q3_A = zeros((Np**2, Nh**2), dtype = int) self.Q3_B = zeros((Np**2, Nh**2), dtype = int) self.Q4_A = zeros((Np**2, Nh**2), dtype = int) self.Q4_B = zeros((Np**2, Nh**2), dtype = int) for i in range(Nh): for j in range(Nh): for a in range(Np): for b in range(Np): self.Pai_bjA[a + i*Np, b + j*Np] = a + b*Np self.Pai_bjB[a + i*Np, b + j*Np] = i + j*Nh self.L3_A[a + b*Np, i + j*Nh] = a + i*Np self.L3_B[a + b*Np, i + j*Nh] = b + j*Np self.Pbj_aiA[b + j*Np, a + i*Np] = a + b*Np self.Pbj_aiB[b + j*Np, a + i*Np] = i + j*Nh self.Pi_jabA[i,j+ a*Nh + b *Np*Nh] = a + b*Np #TQ31&TQ32 self.Pi_jabB[i,j+ a*Nh + b *Np*Nh] = i + j*Nh self.Pa_bjiA[a,b+ j*Np + i *Nh*Np] = a + b*Np self.Pa_bjiB[a,b+ j*Np + i *Nh*Np] = j + i*Nh self.Pb_aijA[b, a + i*Np + j *Nh*Np] = a + b*Np self.Pb_aijB[b, a + i*Np + j *Nh*Np] = i + j*Nh self.Q2_A[a + b*Np, i + j*Nh] = a + i*Np self.Q2_B[a + b*Np, i + j*Nh] = b + j*Np self.Q3_A[a + b*Np, i + j*Nh] = i self.Q3_B[a + b*Np, i + j*Nh] = j + a*Nh + b*Nh*Np self.Q4_A[a + b*Np, i + j*Nh] = a self.Q4_B[a + b*Np, i + j*Nh] = b + i*Np + j*Np*Nh #Q3[a + b*Np, i + j*Nh] = Q3_[i, j + a*Nh + b*Nh*Np] #Q2[a + b*Np, i + j*Nh] = Q2_[a + i*Np, b + j*Np] Np = 3 Nh = 2 h = transformer(Np,Nh) pphh = random.randint(0,10,(Np**2,Nh**2)) vv = random.randint(0,10,(Np*Nh,Nh*Np)) #pphh = zeros((Np**2,Nh**2)) #n = 0 #for a in range(Np**2): # for i in range(Nh**2): # pphh[a,i] = n # n+= 1 Z = zeros((Np,Np*Nh**2), dtype = int) for i in range(Nh): for j in range(Nh): for a in range(Np): for b in range(Np): Z[a,b+ j*Np + i *Nh*Np] = pphh[a + b*Np, j + i*Nh] def PermuteIndexes(array, perm): return array[ix_(*(perm[:s] for s in array.shape))] print pphh print "===" print Z print "===" print pphh[h.Pa_bjiA, h.Pa_bjiB] #print "PP:", pphh[PaibjA, PaibjB] a,b,i,j = 0,2,1,1 L3_t = 0.0 for k in range(Nh): for c in range(Np): L3_t += vv[k + b*Nh, c + j*Np]*pphh[a + c*Np, i + k*Nh] print "L3_linear:", L3_t #print pphh[pphh[0]] ktrA = zeros((Nh*Np, Nh*Np), dtype = int) ktrB = zeros((Nh*Np, Nh*Np), dtype = int) ktr = zeros((Nh*Np, Nh*Np)) ptrA = zeros((Nh*Np, Nh*Np), dtype = int) ptrB = zeros((Nh*Np, Nh*Np), dtype = int) ptr = zeros((Nh*Np, Nh*Np)) L3A = zeros((Np**2, Nh**2), dtype = int) L3B = zeros((Np**2, Nh**2), dtype = int) for i_ in range(Nh): for j_ in range(Nh): for a_ in range(Np): for b_ in range(Np): ktrA[j_ + b_*Nh, a_ + i_*Np] = j_ + a_*Nh #L3 transformation indices ktrB[j_ + b_*Nh, a_ + i_*Np] = b_ + i_*Np ktr[j_ + b_*Nh, a_ + i_*Np] = vv[j_ + a_*Nh, b_ + i_*Np] ptrA[a_ + i_*Np, j_ + b_*Nh] = a_ + b_*Np ptrB[a_ + i_*Np, j_ + b_*Nh] = i_ + j_*Nh ptr[a_ + i_*Np, j_ + b_*Nh] = pphh[a_ + b_*Np, i_+j_*Nh] L3A[a_ + b_*Np, i_ + j_*Nh] = a_ + i_*Np L3B[a_ + b_*Np, i_ + j_*Nh] = b_ + j_*Np print "===" print ktr KT = vv[ktrA, ktrB] print "===" print ptr PT = pphh[ptrA, ptrB] L3_ = dot(PT,KT) L3 = zeros((Np**2, Nh**2)) for i_ in range(Nh): for j_ in range(Nh): for a_ in range(Np): for b_ in range(Np): L3[a_ + b_*Np, i_ + j_*Nh] = L3_[a_ + i_*Np, b_ + j_*Np] print "L3_linear:", L3_t print L3 print L3_[L3A, L3B][a + b*Np, i + j*Nh] #print pphh[range(0,3)] #print h.abij2aibj_(pphh) #print h.abij2aibj(pphh) #print h.abij2aibj(pphh) """ TL3[a + i*Np, b + j*Np] = T[a + b*Np, i + j*Nh] L3[a + b*Np, i + j*Nh] = L3_[a + i*Np, b + j*Np] TQ21[a + i*Np, b + j*Np] = T[a + b*Np, i + j*Nh] TQ22[b + j*Np, a + i*Np] = T[a + b*Np, i + j*Nh] TQ31[i,j+ a*Nh + b *Np*Nh] = T[a + b*Np, i + j*Nh] TQ32[i,j+ a*Nh + b *Np*Nh] = T[a + b*Np, i + j*Nh] TQ41[a,b+ j*Np + i *Nh*Np] = T[a + b*Np, j + i*Nh] TQ42[b, a + i*Np + j *Nh*Np] = T[b + a*Np, i + j*Nh] Q4[a + b*Np, i + j*Nh] = Q4_[a, b + i*Np + j*Np*Nh] """
darkwing/kuma
refs/heads/master
kuma/wiki/views/edit.py
4
# -*- coding: utf-8 -*- import textwrap from urllib import urlencode import newrelic.agent from django.conf import settings from django.core.exceptions import PermissionDenied from django.http import HttpResponse from django.shortcuts import get_object_or_404, redirect, render from django.utils.translation import ugettext from django.views.decorators.clickjacking import xframe_options_sameorigin from django.views.decorators.http import require_http_methods from jingo.helpers import urlparams from ratelimit.decorators import ratelimit import kuma.wiki.content from kuma.attachments.forms import AttachmentRevisionForm from kuma.core.decorators import block_user_agents, login_required, never_cache from kuma.core.urlresolvers import reverse from kuma.core.utils import limit_banned_ip_to_0 from ..decorators import (check_readonly, prevent_indexing, process_document_path) from ..forms import DocumentForm, RevisionForm from ..models import Document, Revision from .translate import translate from .utils import document_form_initial, split_slug @xframe_options_sameorigin def _edit_document_collision(request, orig_rev, curr_rev, is_iframe_target, is_raw, rev_form, doc_form, section_id, rev, doc): """ Handle when a mid-air collision is detected upon submission """ # Process the content as if it were about to be saved, so that the # html_diff is close as possible. content = (kuma.wiki.content.parse(request.POST['content']) .injectSectionIDs() .serialize()) # Process the original content for a diff, extracting a section if we're # editing one. if doc.is_template: curr_content = curr_rev.content else: parsed_content = kuma.wiki.content.parse(curr_rev.content) parsed_content.injectSectionIDs() if section_id: parsed_content.extractSection(section_id) curr_content = parsed_content.serialize() if is_raw: # When dealing with the raw content API, we need to signal the conflict # differently so the client-side can escape out to a conflict # resolution UI. response = HttpResponse('CONFLICT') response.status_code = 409 return response # Make this response iframe-friendly so we can hack around the # save-and-edit iframe button context = { 'collision': True, 'revision_form': rev_form, 'document_form': doc_form, 'content': content, 'current_content': curr_content, 'section_id': section_id, 'original_revision': orig_rev, 'current_revision': curr_rev, 'revision': rev, 'document': doc, } return render(request, 'wiki/edit.html', context) @block_user_agents @require_http_methods(['GET', 'POST']) @login_required # TODO: Stop repeating this knowledge here and in Document.allows_editing_by. @ratelimit(key='user', rate=limit_banned_ip_to_0, block=True) @process_document_path @check_readonly @prevent_indexing @never_cache @newrelic.agent.function_trace() def edit(request, document_slug, document_locale, revision_id=None): """ Create a new revision of a wiki document, or edit document metadata. """ doc = get_object_or_404(Document, locale=document_locale, slug=document_slug) # If this document has a parent, then the edit is handled by the # translate view. Pass it on. if doc.parent and doc.parent.id != doc.id: return translate(request, doc.parent.slug, doc.locale, revision_id, bypass_process_document_path=True) if revision_id: rev = get_object_or_404(Revision, pk=revision_id, document=doc) else: rev = doc.current_revision or doc.revisions.order_by('-created', '-id')[0] # Keep hold of the full post slug slug_dict = split_slug(document_slug) # Update the slug, removing the parent path, and # *only* using the last piece. # This is only for the edit form. rev.slug = slug_dict['specific'] section_id = request.GET.get('section', None) if section_id and not request.is_ajax(): return HttpResponse(ugettext("Sections may only be edited inline.")) disclose_description = bool(request.GET.get('opendescription')) doc_form = rev_form = None if doc.allows_revision_by(request.user): rev_form = RevisionForm(request=request, instance=rev, initial={'based_on': rev.id, 'current_rev': rev.id, 'comment': ''}, section_id=section_id) if doc.allows_editing_by(request.user): doc_form = DocumentForm(initial=document_form_initial(doc)) # Need to make check *here* to see if this could have a translation parent show_translation_parent_block = ( (document_locale != settings.WIKI_DEFAULT_LANGUAGE) and (not doc.parent_id)) if request.method == 'GET': if not (rev_form or doc_form): # You can't do anything on this page, so get lost. raise PermissionDenied else: # POST is_iframe_target = request.GET.get('iframe', False) is_raw = request.GET.get('raw', False) need_edit_links = request.GET.get('edit_links', False) parent_id = request.POST.get('parent_id', '') # Attempt to set a parent if show_translation_parent_block and parent_id: try: parent_doc = get_object_or_404(Document, id=parent_id) doc.parent = parent_doc except Document.DoesNotExist: pass # Comparing against localized names for the Save button bothers me, so # I embedded a hidden input: which_form = request.POST.get('form') if which_form == 'doc': if doc.allows_editing_by(request.user): post_data = request.POST.copy() post_data.update({'locale': document_locale}) doc_form = DocumentForm(post_data, instance=doc) if doc_form.is_valid(): # if must be here for section edits if 'slug' in post_data: post_data['slug'] = u'/'.join([slug_dict['parent'], post_data['slug']]) # Get the possibly new slug for the imminent redirection: doc = doc_form.save(parent=None) if is_iframe_target: # TODO: Does this really need to be a template? Just # shoehorning data into a single HTML element. response = HttpResponse(textwrap.dedent(""" <span id="iframe-response" data-status="OK" data-current-revision="%s">OK</span> """ % doc.current_revision.id)) response['X-Frame-Options'] = 'SAMEORIGIN' return response return redirect(urlparams(doc.get_edit_url(), opendescription=1)) disclose_description = True else: raise PermissionDenied elif which_form == 'rev': if not doc.allows_revision_by(request.user): raise PermissionDenied else: post_data = request.POST.copy() rev_form = RevisionForm(request=request, data=post_data, is_iframe_target=is_iframe_target, section_id=section_id) rev_form.instance.document = doc # for rev_form.clean() # Come up with the original revision to which these changes # would be applied. orig_rev_id = request.POST.get('current_rev', False) if orig_rev_id is False: orig_rev = None else: orig_rev = Revision.objects.get(pk=orig_rev_id) # Get the document's actual current revision. curr_rev = doc.current_revision if not rev_form.is_valid(): # Was there a mid-air collision? if 'current_rev' in rev_form._errors: # Jump out to a function to escape indentation hell return _edit_document_collision( request, orig_rev, curr_rev, is_iframe_target, is_raw, rev_form, doc_form, section_id, rev, doc) if rev_form.is_valid(): rev_form.save(doc) if is_iframe_target: # TODO: Does this really need to be a template? Just # shoehorning data into a single HTML element. response = HttpResponse(""" <span id="iframe-response" data-status="OK" data-current-revision="%s">OK</span> """ % doc.current_revision.id) response['X-Frame-Options'] = 'SAMEORIGIN' return response if (is_raw and orig_rev is not None and curr_rev.id != orig_rev.id): # If this is the raw view, and there was an original # revision, but the original revision differed from the # current revision at start of editing, we should tell # the client to refresh the page. response = HttpResponse('RESET') response['X-Frame-Options'] = 'SAMEORIGIN' response.status_code = 205 return response if rev_form.instance.is_approved: view = 'wiki.document' else: view = 'wiki.document_revisions' # Construct the redirect URL, adding any needed parameters url = reverse(view, args=[doc.slug], locale=doc.locale) params = {} if is_raw: params['raw'] = 'true' if need_edit_links: # Only need to carry over ?edit_links with ?raw, # because they're on by default in the normal UI params['edit_links'] = 'true' if section_id: # If a section was edited, and we're using the raw # content API, constrain to that section. params['section'] = section_id if params: url = '%s?%s' % (url, urlencode(params)) if not is_raw and section_id: # If a section was edited, jump to the section anchor # if we're not getting raw content. url = '%s#%s' % (url, section_id) return redirect(url) parent_path = parent_slug = '' if slug_dict['parent']: parent_slug = slug_dict['parent'] if doc.parent_topic_id: parent_doc = Document.objects.get(pk=doc.parent_topic_id) parent_path = parent_doc.get_absolute_url() parent_slug = parent_doc.slug context = { 'revision_form': rev_form, 'document_form': doc_form, 'section_id': section_id, 'disclose_description': disclose_description, 'parent_slug': parent_slug, 'parent_path': parent_path, 'revision': rev, 'document': doc, 'attachment_form': AttachmentRevisionForm(), } return render(request, 'wiki/edit.html', context)
leopittelli/Django-on-App-Engine-Example
refs/heads/master
django/contrib/sitemaps/tests/test_http.py
109
from __future__ import unicode_literals import os from datetime import date from django.conf import settings from django.contrib.sitemaps import Sitemap, GenericSitemap from django.contrib.sites.models import Site from django.core.exceptions import ImproperlyConfigured from django.test.utils import override_settings from django.utils.unittest import skipUnless from django.utils.formats import localize from django.utils._os import upath from django.utils.translation import activate, deactivate from .base import TestModel, SitemapTestsBase class HTTPSitemapTests(SitemapTestsBase): def test_simple_sitemap_index(self): "A simple sitemap index can be rendered" response = self.client.get('/simple/index.xml') expected_content = """<?xml version="1.0" encoding="UTF-8"?> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap> </sitemapindex> """ % self.base_url self.assertXMLEqual(response.content.decode('utf-8'), expected_content) @override_settings( TEMPLATE_DIRS=(os.path.join(os.path.dirname(upath(__file__)), 'templates'),) ) def test_simple_sitemap_custom_index(self): "A simple sitemap index can be rendered with a custom template" response = self.client.get('/simple/custom-index.xml') expected_content = """<?xml version="1.0" encoding="UTF-8"?> <!-- This is a customised template --> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap> </sitemapindex> """ % self.base_url self.assertXMLEqual(response.content.decode('utf-8'), expected_content) def test_simple_sitemap_section(self): "A simple sitemap section can be rendered" response = self.client.get('/simple/sitemap-simple.xml') expected_content = """<?xml version="1.0" encoding="UTF-8"?> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url> </urlset> """ % (self.base_url, date.today()) self.assertXMLEqual(response.content.decode('utf-8'), expected_content) def test_simple_sitemap(self): "A simple sitemap can be rendered" response = self.client.get('/simple/sitemap.xml') expected_content = """<?xml version="1.0" encoding="UTF-8"?> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url> </urlset> """ % (self.base_url, date.today()) self.assertXMLEqual(response.content.decode('utf-8'), expected_content) @override_settings( TEMPLATE_DIRS=(os.path.join(os.path.dirname(upath(__file__)), 'templates'),) ) def test_simple_custom_sitemap(self): "A simple sitemap can be rendered with a custom template" response = self.client.get('/simple/custom-sitemap.xml') expected_content = """<?xml version="1.0" encoding="UTF-8"?> <!-- This is a customised template --> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url> </urlset> """ % (self.base_url, date.today()) self.assertXMLEqual(response.content.decode('utf-8'), expected_content) @skipUnless(settings.USE_I18N, "Internationalization is not enabled") @override_settings(USE_L10N=True) def test_localized_priority(self): "The priority value should not be localized (Refs #14164)" activate('fr') self.assertEqual('0,3', localize(0.3)) # Retrieve the sitemap. Check that priorities # haven't been rendered in localized format response = self.client.get('/simple/sitemap.xml') self.assertContains(response, '<priority>0.5</priority>') self.assertContains(response, '<lastmod>%s</lastmod>' % date.today()) deactivate() def test_requestsite_sitemap(self): # Make sure hitting the flatpages sitemap without the sites framework # installed doesn't raise an exception Site._meta.installed = False response = self.client.get('/simple/sitemap.xml') expected_content = """<?xml version="1.0" encoding="UTF-8"?> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <url><loc>http://testserver/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url> </urlset> """ % date.today() self.assertXMLEqual(response.content.decode('utf-8'), expected_content) @skipUnless("django.contrib.sites" in settings.INSTALLED_APPS, "django.contrib.sites app not installed.") def test_sitemap_get_urls_no_site_1(self): """ Check we get ImproperlyConfigured if we don't pass a site object to Sitemap.get_urls and no Site objects exist """ Site.objects.all().delete() self.assertRaises(ImproperlyConfigured, Sitemap().get_urls) def test_sitemap_get_urls_no_site_2(self): """ Check we get ImproperlyConfigured when we don't pass a site object to Sitemap.get_urls if Site objects exists, but the sites framework is not actually installed. """ Site._meta.installed = False self.assertRaises(ImproperlyConfigured, Sitemap().get_urls) def test_sitemap_item(self): """ Check to make sure that the raw item is included with each Sitemap.get_url() url result. """ test_sitemap = GenericSitemap({'queryset': TestModel.objects.all()}) def is_testmodel(url): return isinstance(url['item'], TestModel) item_in_url_info = all(map(is_testmodel, test_sitemap.get_urls())) self.assertTrue(item_in_url_info) def test_cached_sitemap_index(self): """ Check that a cached sitemap index can be rendered (#2713). """ response = self.client.get('/cached/index.xml') expected_content = """<?xml version="1.0" encoding="UTF-8"?> <sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"> <sitemap><loc>%s/cached/sitemap-simple.xml</loc></sitemap> </sitemapindex> """ % self.base_url self.assertXMLEqual(response.content.decode('utf-8'), expected_content) def test_x_robots_sitemap(self): response = self.client.get('/simple/index.xml') self.assertEqual(response['X-Robots-Tag'], 'noindex, noodp, noarchive') response = self.client.get('/simple/sitemap.xml') self.assertEqual(response['X-Robots-Tag'], 'noindex, noodp, noarchive')
rjeschmi/easybuild-easyblocks
refs/heads/master
easybuild/easyblocks/s/soapdenovo.py
3
## # This file is an EasyBuild reciPY as per https://github.com/hpcugent/easybuild # # Copyright:: Copyright 2012-2013 University of Luxembourg/Luxembourg Centre for Systems Biomedicine # Authors:: Cedric Laczny <cedric.laczny@uni.lu>, Fotis Georgatos <fotis.georgatos@uni.lu>, Kenneth Hoste # License:: MIT/GPL # $Id$ # # This work implements a part of the HPCBIOS project and is a component of the policy: # http://hpcbios.readthedocs.org/en/latest/HPCBIOS_2012-94.html ## """ EasyBuild support for building SOAPdenovo, implemented as an easyblock @author: Cedric Laczny (Uni.Lu) @author: Fotis Georgatos (Uni.Lu) @author: Kenneth Hoste (Ghent University) """ import os import shutil from easybuild.easyblocks.generic.configuremake import ConfigureMake class EB_SOAPdenovo(ConfigureMake): """ Support for building SOAPdenovo. """ def __init__(self, *args, **kwargs): """Define lists of files to install.""" super(EB_SOAPdenovo, self).__init__(*args, **kwargs) self.bin_suffixes = ["31mer", "63mer", "127mer"] def configure_step(self): """ Skip the configure as not part of this build process """ pass def install_step(self): """ Install by copying files to install dir """ srcdir = self.cfg['start_dir'] destdir = os.path.join(self.installdir, 'bin') srcfile = None try: os.makedirs(destdir) for suff in self.bin_suffixes: srcfile = os.path.join(srcdir, "bin", "SOAPdenovo-%s" % suff) shutil.copy2(srcfile, destdir) except OSError, err: self.log.error("Copying %s to installation dir %s failed: %s" % (srcfile, destdir, err)) def sanity_check_step(self): """Custom sanity check for SOAPdenovo.""" custom_paths = { 'files': ['bin/SOAPdenovo-%s' % x for x in self.bin_suffixes], 'dirs': [] } super(EB_SOAPdenovo, self).sanity_check_step(custom_paths=custom_paths)
BRAINSia/ITK
refs/heads/master
Modules/Segmentation/LevelSets/wrapping/test/ThresholdSegmentationLevelSetImageFilterTest.py
7
# ========================================================================== # # Copyright NumFOCUS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ==========================================================================*/ # INPUTS: {BrainProtonDensitySlice.png} # OUTPUTS: {ThresholdSegmentationLevelSetImageFilterWhiteMatter.png} # 60 116 5 150 180 # INPUTS: {BrainProtonDensitySlice.png} # OUTPUTS: {ThresholdSegmentationLevelSetImageFilterVentricle.png} # 81 112 5 210 250 # INPUTS: {BrainProtonDensitySlice.png} # OUTPUTS: {ThresholdSegmentationLevelSetImageFilterGrayMatter.png} # 107 69 5 180 210 import itk from sys import argv, stderr, exit import os itk.auto_progress(2) # itk.auto_progress(1) if len(argv) < 8: print( ( "Missing Parameters \n Usage: " "ThresholdSegmentationLevelSetImageFilter.py inputImage outputImage " "seedX seedY InitialDistance LowerThreshold UpperThreshold " "[CurvatureScaling == 1.0]" ), file=stderr, ) exit(1) InternalPixelType = itk.F Dimension = 2 InternalImageType = itk.Image[InternalPixelType, Dimension] OutputPixelType = itk.UC OutputImageType = itk.Image[OutputPixelType, Dimension] thresholder = itk.BinaryThresholdImageFilter[InternalImageType, OutputImageType].New() thresholder.SetLowerThreshold(-1000.0) thresholder.SetUpperThreshold(0.0) thresholder.SetOutsideValue(0) thresholder.SetInsideValue(255) ReaderType = itk.ImageFileReader[InternalImageType] WriterType = itk.ImageFileWriter[OutputImageType] reader = ReaderType.New() writer = WriterType.New() reader.SetFileName(argv[1]) writer.SetFileName(argv[2]) FastMarchingFilterType = itk.FastMarchingImageFilter[ InternalImageType, InternalImageType ] fastMarching = FastMarchingFilterType.New() ThresholdSegLvlSetImgFilterType = itk.ThresholdSegmentationLevelSetImageFilter[ InternalImageType, InternalImageType, InternalPixelType ] thresholdSegmentation = ThresholdSegLvlSetImgFilterType.New() thresholdSegmentation.SetPropagationScaling(1.0) if len(argv) > 8: thresholdSegmentation.SetCurvatureScaling(float(argv[8])) else: thresholdSegmentation.SetCurvatureScaling(1.0) thresholdSegmentation.SetMaximumRMSError(0.02) thresholdSegmentation.SetNumberOfIterations(1200) thresholdSegmentation.SetUpperThreshold(float(argv[7])) thresholdSegmentation.SetLowerThreshold(float(argv[6])) thresholdSegmentation.SetIsoSurfaceValue(0.0) thresholdSegmentation.SetInput(fastMarching.GetOutput()) thresholdSegmentation.SetFeatureImage(reader.GetOutput()) thresholder.SetInput(thresholdSegmentation.GetOutput()) writer.SetInput(thresholder.GetOutput()) NodeType = itk.LevelSetNode[InternalPixelType, Dimension] NodeContainer = itk.VectorContainer[itk.UI, NodeType] seeds = NodeContainer.New() seedPosition = [int(argv[3]), int(argv[4])] initialDistance = float(argv[5]) node = NodeType() seedValue = -initialDistance node.SetValue(seedValue) node.SetIndex(seedPosition) seeds.Initialize() seeds.InsertElement(0, node) fastMarching.SetTrialPoints(seeds) fastMarching.SetSpeedConstant(1.0) reader.Update() fastMarching.SetOutputSize(reader.GetOutput().GetBufferedRegion().GetSize()) writer.Update() itk.echo(thresholdSegmentation) InternalWriterType = itk.ImageFileWriter[InternalImageType] outputDirectory = os.path.dirname(argv[2]) mapWriter = InternalWriterType.New() mapWriter.SetInput(fastMarching.GetOutput()) mapWriter.SetFileName(os.path.join(outputDirectory, "fastMarchingImage.mha")) mapWriter.Update() speedWriter = InternalWriterType.New() speedWriter.SetInput(thresholdSegmentation.GetSpeedImage()) speedWriter.SetFileName(os.path.join(outputDirectory, "speedTermImage.mha")) speedWriter.Update()
pitunti/alfaPitunti
refs/heads/master
plugin.video.alfa/channels/wopelis.py
1
# -*- coding: utf-8 -*- import re from core import channeltools from core import httptools from core import scrapertools from core import servertools from core import tmdb from core.item import Item from platformcode import config, logger HOST = 'http://www.wopelis.com' __channel__ = 'wopelis' parameters = channeltools.get_channel_parameters(__channel__) fanart_host = parameters['fanart'] thumbnail_host = parameters['thumbnail'] color1, color2, color3 = ['0xFF58D3F7', '0xFF2E64FE', '0xFF0404B4'] def mainlist(item): logger.info() itemlist = [] item.url = HOST item.text_color = color2 item.fanart = fanart_host item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/Directors%20Chair.png" url = HOST + "/galep.php?solo=cenlaces&empen=0" itemlist.append(item.clone(title="Películas:", folder=False, text_color=color3, text_bold=True)) itemlist.append(item.clone(title=" Recientes", action="listado", url=url)) itemlist.append(item.clone(title=" Mas populares de la semana", action="listado", url=url + "&ord=popu")) itemlist.append(item.clone(title=" Por géneros", action="generos", url=HOST + "/index.php")) itemlist.append(item.clone(title=" Buscar película", action="search", url=url)) itemlist.append(item.clone(title="", folder=False, thumbnail=thumbnail_host)) item.thumbnail = "https://github.com/master-1970/resources/raw/master/images/genres/0/TV%20Series.png" url = HOST + "/gales.php?empen=0" itemlist.append(item.clone(title="Series:", folder=False, text_color=color3, text_bold=True)) itemlist.append(item.clone(title=" Nuevos episodios", action="listado", url=url + "&ord=reci")) itemlist.append(item.clone(title=" Mas populares de la semana", action="listado", url=url + "&ord=popu")) itemlist.append(item.clone(title=" Por géneros", action="generos", url=HOST + "/series.php")) itemlist.append(item.clone(title=" Buscar serie", action="search", url=url + "&ord=popu")) return itemlist def newest(categoria): logger.info() itemlist = [] item = Item() try: if categoria == 'peliculas': item.url = HOST + "/galep.php?solo=cenlaces&empen=0" elif categoria == 'series': item.url = HOST + "/gales.php?empen=0&ord=reci" else: return [] itemlist = listado(item) if itemlist[-1].title == ">> Página siguiente": itemlist.pop() # Se captura la excepción, para no interrumpir al canal novedades si un canal falla except: import sys for line in sys.exc_info(): logger.error("{0}".format(line)) return [] return itemlist def search(item, texto): logger.info("search:" + texto) try: if texto: item.url = "%s&busqueda=%s" % (item.url, texto.replace(" ", "+")) return listado(item) else: return [] # Se captura la excepción, para no interrumpir al buscador global si un canal falla except: import sys for line in sys.exc_info(): logger.error("%s" % line) return [] def generos(item): logger.info() itemlist = [] dict_gender = {"acción": "accion", "animación": "animacion", "ciencia ficción": "ciencia%20ficcion", "fantasía": "fantasia", "música": "musica", "película de la televisión": "pelicula%20de%20tv"} data = downloadpage(item.url) data = scrapertools.find_single_match(data, '<select name="gener">(.*?)</select>') for genero in scrapertools.find_multiple_matches(data, '<option value="([^"]+)'): if genero != 'Todos': if 'series' in item.url: url = HOST + "/gales.php?empen=0&gener=%s" % genero else: url = HOST + "/galep.php?solo=cenlaces&empen=0&gener=%s" % genero thumbnail = "https://raw.githubusercontent.com/master-1970/resources/master/images/genres/4/azul/%s.png" thumbnail = thumbnail % dict_gender.get(genero.lower(), genero.lower()) itemlist.append(Item(channel=item.channel, action="listado", title=genero, url=url, text_color=color1, contentType='movie', folder=True, thumbnail=thumbnail)) # ,viewmode="movie_with_plot")) return sorted(itemlist, key=lambda i: i.title.lower()) def listado(item): logger.info(item) itemlist = [] data = downloadpage(item.url) patron = '<a class="extended" href=".([^"]+).*?' patron += '<img class="centeredPicFalse"([^>]+).*?' patron += '<span class="year">(\d{4})</span>.*?' patron += '<span class="title">(.*?)</span>' for url, pic, year, title in scrapertools.find_multiple_matches(data, patron): thumbnail = scrapertools.find_single_match(pic, 'src="([^"]+)') if not thumbnail: thumbnail = HOST + "/images/cover-notfound.png" new_item = Item(channel=__channel__, thumbnail=thumbnail, text_color=color2, infoLabels={"year": year}) if "galep.php" in item.url: # movie new_item.contentTitle = title new_item.action = "findvideos" new_item.url = HOST + url.replace('peli.php?id=', 'venlaces.php?npl=') elif "gales.php" in item.url: # tvshow title = title.replace(' - 0x0', '') new_item.contentSerieName = title new_item.action = "temporadas" new_item.url = HOST + url if "ord=reci" in item.url: # episode season_episode = scrapertools.get_season_and_episode(title) if season_episode: new_item.contentSeason, new_item.contentEpisodeNumber = season_episode.split('x') new_item.action = "get_episodio" new_item.contentSerieName = title.split('-', 1)[1].strip() elif "gener=" in item.url and scrapertools.get_season_and_episode(title): # Las series filtrada por genero devuelven capitulos y series completas title = title.split('-', 1)[1].strip() new_item.contentSerieName = title else: return [] new_item.title = "%s (%s)" % (title, year) itemlist.append(new_item) if itemlist: # Obtenemos los datos basicos mediante multihilos tmdb.set_infoLabels(itemlist) # Si es necesario añadir paginacion if len(itemlist) == 35: empen = scrapertools.find_single_match(item.url, 'empen=(\d+)') url_next_page = item.url.replace('empen=%s' % empen, 'empen=%s' % (int(empen) + 35)) itemlist.append(Item(channel=item.channel, action="listado", title=">> Página siguiente", thumbnail=thumbnail_host, url=url_next_page, folder=True, text_color=color3, text_bold=True)) return itemlist def temporadas(item): logger.info(item) itemlist = [] data = downloadpage(item.url) patron = '<div class="checkSeason" data-num="([^"]+)[^>]+>([^<]+)' for num_season, title in scrapertools.find_multiple_matches(data, patron): itemlist.append(item.clone(contentSeason=num_season, title="%s - %s" % (item.contentSerieName, title), action="episodios")) if itemlist: # Obtenemos los datos de las temporadas mediante multihilos tmdb.set_infoLabels(itemlist) if config.get_videolibrary_support(): itemlist.append(item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios", text_color=color1, thumbnail=thumbnail_host, fanart=fanart_host)) return itemlist def episodios(item): logger.info() itemlist = [] data = downloadpage(item.url) patron = '<div class="checkSeason" data-num="([^"]+)(.*?)</div></div></div>' for num_season, data in scrapertools.find_multiple_matches(data, patron): if item.contentSeason and item.contentSeason != int(num_season): # Si buscamos los episodios de una temporada concreta y no es esta (num_season)... continue patron = '<div class="info"><a href="..([^"]+).*?class="number">([^<]+)' for url, num_episode in scrapertools.find_multiple_matches(data, patron): if item.contentEpisodeNumber and item.contentEpisodeNumber != int(num_episode): # Si buscamos un episodio concreto y no es este (num_episode)... continue title = "%sx%s - %s" % (num_season, num_episode.strip().zfill(2), item.contentSerieName) itemlist.append(item.clone(title=title, url=HOST + url, action="findvideos", contentSeason=num_season, contentEpisodeNumber=num_episode)) if itemlist and hasattr(item, 'contentSeason'): # Obtenemos los datos de los episodios de esta temporada mediante multihilos tmdb.set_infoLabels(itemlist) for i in itemlist: if i.infoLabels['title']: # Si el capitulo tiene nombre propio añadirselo al titulo del item i.title = "%sx%s %s" % ( i.infoLabels['season'], str(i.infoLabels['episode']).zfill(2), i.infoLabels['title']) return itemlist def get_episodio(item): logger.info() itemlist = episodios(item) if itemlist: itemlist = findvideos(itemlist[0]) return itemlist def findvideos(item): logger.info() itemlist = [] dic_langs = {'esp': 'Español', 'english': 'Ingles', 'japo': 'Japones', 'argentina': 'Latino', 'ntfof': ''} dic_servers = {'ntfof': 'Servidor Desconocido', 'stramango': 'streamango', 'flasht': 'flashx'} data1 = downloadpage(item.url) patron = '(?s)onclick="redir\(([^\)]+).*?' patron += '<img style="float:left" src="./[^/]+/([^\.]+).+?' patron += '<span[^>]+>([^<]+).*?' patron += '<img(.*?)on' if "Descarga:</h1>" in data1: list_showlinks = [('Online:', 'Online:</h1>(.*?)Descarga:</h1>'), ('Download:', 'Descarga:</h1>(.*?)</section>')] else: list_showlinks = [('Online:', 'Online:</h1>(.*?)</section>')] for t in list_showlinks: data = scrapertools.find_single_match(data1, t[1]) if data: itemlist.append(Item(title=t[0], text_color=color3, text_bold=True, folder=False, thumbnail=thumbnail_host)) for redir, server, quality, langs in scrapertools.find_multiple_matches(data, patron): # , server, quality, langs redir = redir.split(",") url = redir[0][1:-1] id = redir[1][1:-1] # type = redir[2][1:-1] # url = url.split("','")[0] # [2] = 0 movies, [2] = 1 tvshows langs = scrapertools.find_multiple_matches(langs, 'src="./images/([^\.]+)') idioma = dic_langs.get(langs[0], langs[0]) subtitulos = dic_langs.get(langs[1], langs[1]) if subtitulos: idioma = "%s (Sub: %s)" % (idioma, subtitulos) if server in dic_servers: server = dic_servers[server] itemlist.append( item.clone(url=url, action="play", language=idioma, contentQuality=quality, server=server, title=" %s: %s [%s]" % (server.capitalize(), idioma, quality))) if itemlist and config.get_videolibrary_support() and not "library" in item.extra: if item.contentType == 'movie': itemlist.append(item.clone(title="Añadir película a la videoteca", action="add_pelicula_to_library", text_color=color1, contentTitle=item.contentTitle, extra="library", thumbnail=thumbnail_host)) else: # http://www.wopelis.com/serie.php?id=275641 item.url = "http://www.wopelis.com/serie.php?id=" + id item.contentSeason = 0 item.contentEpisodeNumber = 0 # logger.error(item) itemlist.append(item.clone(title="Añadir esta serie a la videoteca", action="add_serie_to_library", extra="episodios###library", text_color=color1, thumbnail=thumbnail_host)) return itemlist def play(item): logger.info() itemlist = [] # Buscamos video por servidor ... devuelve = servertools.findvideosbyserver(item.url, item.server) if not devuelve: # ...sino lo encontramos buscamos en todos los servidores disponibles devuelve = servertools.findvideos(item.url, skip=True) if devuelve: # logger.debug(devuelve) itemlist.append(Item(channel=item.channel, title=item.contentTitle, action="play", server=devuelve[0][2], url=devuelve[0][1], thumbnail=item.thumbnail, folder=False)) return itemlist def downloadpage(url): cookievalue = config.get_setting("cookie", "wopelis") if not cookievalue: data = httptools.downloadpage(url).data cookievalue = get_cookie(data) headers = {'Cookie': '%s' % cookievalue} data = httptools.downloadpage(url, headers=headers).data if "Hola bienvenido" in data: cookievalue = get_cookie(data) headers = {'Cookie': '%s' % cookievalue} data = httptools.downloadpage(url, headers=headers).data return re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data) def get_cookie(data): import random cookievalue = "" cookiename = scrapertools.find_single_match(data, 'document.cookie\s*=\s*"([^"]+)"') cookiename = cookiename.replace("=", "") posible = scrapertools.find_single_match(data, 'var possible\s*=\s*"([^"]+)"') bloque = scrapertools.find_single_match(data, 'function cok(.*?);') lengths = scrapertools.find_multiple_matches(bloque, '([\S]{1}\d+)') for numero in lengths: if numero.startswith("("): for i in range(0, int(numero[1:])): cookievalue += posible[int(random.random() * len(posible))] else: cookievalue += numero[1:] cookievalue = "%s=%s" % (cookiename, cookievalue) config.set_setting("cookie", cookievalue, "wopelis") return cookievalue
nsnam/ns-3-dev-git
refs/heads/master
src/propagation/bindings/modulegen__gcc_LP64.py
4
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers import pybindgen.settings import warnings class ErrorHandler(pybindgen.settings.ErrorHandler): def handle_error(self, wrapper, exception, traceback_): warnings.warn("exception %r in wrapper %s" % (exception, wrapper)) return True pybindgen.settings.error_handler = ErrorHandler() import sys def module_init(): root_module = Module('ns.propagation', cpp_namespace='::ns3') return root_module def register_types(module): root_module = module.get_root() ## propagation-environment.h (module 'propagation'): ns3::EnvironmentType [enumeration] module.add_enum('EnvironmentType', ['UrbanEnvironment', 'SubUrbanEnvironment', 'OpenAreasEnvironment']) ## propagation-environment.h (module 'propagation'): ns3::CitySize [enumeration] module.add_enum('CitySize', ['SmallCity', 'MediumCity', 'LargeCity']) ## log.h (module 'core'): ns3::LogLevel [enumeration] module.add_enum('LogLevel', ['LOG_NONE', 'LOG_ERROR', 'LOG_LEVEL_ERROR', 'LOG_WARN', 'LOG_LEVEL_WARN', 'LOG_DEBUG', 'LOG_LEVEL_DEBUG', 'LOG_INFO', 'LOG_LEVEL_INFO', 'LOG_FUNCTION', 'LOG_LEVEL_FUNCTION', 'LOG_LOGIC', 'LOG_LEVEL_LOGIC', 'LOG_ALL', 'LOG_LEVEL_ALL', 'LOG_PREFIX_FUNC', 'LOG_PREFIX_TIME', 'LOG_PREFIX_NODE', 'LOG_PREFIX_LEVEL', 'LOG_PREFIX_ALL'], import_from_module='ns.core') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class] module.add_class('AttributeConstructionList', import_from_module='ns.core') ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct] module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList']) typehandlers.add_type_alias('std::list< ns3::AttributeConstructionList::Item > const_iterator', 'ns3::AttributeConstructionList::CIterator') typehandlers.add_type_alias('std::list< ns3::AttributeConstructionList::Item > const_iterator*', 'ns3::AttributeConstructionList::CIterator*') typehandlers.add_type_alias('std::list< ns3::AttributeConstructionList::Item > const_iterator&', 'ns3::AttributeConstructionList::CIterator&') ## callback.h (module 'core'): ns3::CallbackBase [class] module.add_class('CallbackBase', import_from_module='ns.core') ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor> [struct] module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor']) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker> [struct] module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeChecker']) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue> [struct] module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeValue']) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase> [struct] module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase']) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::EventImpl> [struct] module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::EventImpl']) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation> [struct] module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation']) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor> [struct] module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor']) ## event-id.h (module 'core'): ns3::EventId [class] module.add_class('EventId', import_from_module='ns.core') ## hash.h (module 'core'): ns3::Hasher [class] module.add_class('Hasher', import_from_module='ns.core') ## log.h (module 'core'): ns3::LogComponent [class] module.add_class('LogComponent', import_from_module='ns.core') typehandlers.add_type_alias('std::map< std::string, ns3::LogComponent * >', 'ns3::LogComponent::ComponentList') typehandlers.add_type_alias('std::map< std::string, ns3::LogComponent * >*', 'ns3::LogComponent::ComponentList*') typehandlers.add_type_alias('std::map< std::string, ns3::LogComponent * >&', 'ns3::LogComponent::ComponentList&') ## object-base.h (module 'core'): ns3::ObjectBase [class] module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core') ## object.h (module 'core'): ns3::ObjectDeleter [struct] module.add_class('ObjectDeleter', import_from_module='ns.core') ## log.h (module 'core'): ns3::ParameterLogger [class] module.add_class('ParameterLogger', import_from_module='ns.core') ## propagation-cache.h (module 'propagation'): ns3::PropagationCache<ns3::JakesProcess> [class] module.add_class('PropagationCache', template_parameters=['ns3::JakesProcess']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::ObjectBase'], template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter']) ## nstime.h (module 'core'): ns3::Time [class] module.add_class('Time', import_from_module='ns.core') ## nstime.h (module 'core'): ns3::Time::Unit [enumeration] module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST', 'AUTO'], outer_class=root_module['ns3::Time'], import_from_module='ns.core') typehandlers.add_type_alias('void ( * ) ( ns3::Time )', 'ns3::Time::TracedCallback') typehandlers.add_type_alias('void ( * ) ( ns3::Time )*', 'ns3::Time::TracedCallback*') typehandlers.add_type_alias('void ( * ) ( ns3::Time )&', 'ns3::Time::TracedCallback&') ## nstime.h (module 'core'): ns3::TimeWithUnit [class] module.add_class('TimeWithUnit', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId [class] module.add_class('TypeId', import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration] module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::SupportLevel [enumeration] module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core') ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct] module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct] module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId']) typehandlers.add_type_alias('uint32_t', 'ns3::TypeId::hash_t') typehandlers.add_type_alias('uint32_t*', 'ns3::TypeId::hash_t*') typehandlers.add_type_alias('uint32_t&', 'ns3::TypeId::hash_t&') ## vector.h (module 'core'): ns3::Vector2D [class] module.add_class('Vector2D', import_from_module='ns.core') ## vector.h (module 'core'): ns3::Vector3D [class] module.add_class('Vector3D', import_from_module='ns.core') ## empty.h (module 'core'): ns3::empty [class] module.add_class('empty', import_from_module='ns.core') ## int64x64-128.h (module 'core'): ns3::int64x64_t [class] module.add_class('int64x64_t', import_from_module='ns.core') ## int64x64-128.h (module 'core'): ns3::int64x64_t::impl_type [enumeration] module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core') ## object.h (module 'core'): ns3::Object [class] module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) ## object.h (module 'core'): ns3::Object::AggregateIterator [class] module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object']) ## propagation-delay-model.h (module 'propagation'): ns3::PropagationDelayModel [class] module.add_class('PropagationDelayModel', parent=root_module['ns3::Object']) ## propagation-loss-model.h (module 'propagation'): ns3::PropagationLossModel [class] module.add_class('PropagationLossModel', parent=root_module['ns3::Object']) ## propagation-delay-model.h (module 'propagation'): ns3::RandomPropagationDelayModel [class] module.add_class('RandomPropagationDelayModel', parent=root_module['ns3::PropagationDelayModel']) ## propagation-loss-model.h (module 'propagation'): ns3::RandomPropagationLossModel [class] module.add_class('RandomPropagationLossModel', parent=root_module['ns3::PropagationLossModel']) ## random-variable-stream.h (module 'core'): ns3::RandomVariableStream [class] module.add_class('RandomVariableStream', import_from_module='ns.core', parent=root_module['ns3::Object']) ## propagation-loss-model.h (module 'propagation'): ns3::RangePropagationLossModel [class] module.add_class('RangePropagationLossModel', parent=root_module['ns3::PropagationLossModel']) ## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable [class] module.add_class('SequentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>']) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class] module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>']) ## three-gpp-propagation-loss-model.h (module 'propagation'): ns3::ThreeGppPropagationLossModel [class] module.add_class('ThreeGppPropagationLossModel', parent=root_module['ns3::PropagationLossModel']) ## three-gpp-propagation-loss-model.h (module 'propagation'): ns3::ThreeGppRmaPropagationLossModel [class] module.add_class('ThreeGppRmaPropagationLossModel', parent=root_module['ns3::ThreeGppPropagationLossModel']) ## three-gpp-propagation-loss-model.h (module 'propagation'): ns3::ThreeGppUmaPropagationLossModel [class] module.add_class('ThreeGppUmaPropagationLossModel', parent=root_module['ns3::ThreeGppPropagationLossModel']) ## three-gpp-propagation-loss-model.h (module 'propagation'): ns3::ThreeGppUmiStreetCanyonPropagationLossModel [class] module.add_class('ThreeGppUmiStreetCanyonPropagationLossModel', parent=root_module['ns3::ThreeGppPropagationLossModel']) ## three-gpp-v2v-propagation-loss-model.h (module 'propagation'): ns3::ThreeGppV2vUrbanPropagationLossModel [class] module.add_class('ThreeGppV2vUrbanPropagationLossModel', parent=root_module['ns3::ThreeGppPropagationLossModel']) ## propagation-loss-model.h (module 'propagation'): ns3::ThreeLogDistancePropagationLossModel [class] module.add_class('ThreeLogDistancePropagationLossModel', parent=root_module['ns3::PropagationLossModel']) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class] module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) ## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable [class] module.add_class('TriangularRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## propagation-loss-model.h (module 'propagation'): ns3::TwoRayGroundPropagationLossModel [class] module.add_class('TwoRayGroundPropagationLossModel', parent=root_module['ns3::PropagationLossModel']) ## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable [class] module.add_class('UniformRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable [class] module.add_class('WeibullRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable [class] module.add_class('ZetaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable [class] module.add_class('ZipfRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## attribute.h (module 'core'): ns3::AttributeAccessor [class] module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) ## attribute.h (module 'core'): ns3::AttributeChecker [class] module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) ## attribute.h (module 'core'): ns3::AttributeValue [class] module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) ## callback.h (module 'core'): ns3::CallbackChecker [class] module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## callback.h (module 'core'): ns3::CallbackImplBase [class] module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) ## callback.h (module 'core'): ns3::CallbackValue [class] module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## channel-condition-model.h (module 'propagation'): ns3::ChannelCondition [class] module.add_class('ChannelCondition', parent=root_module['ns3::Object']) ## channel-condition-model.h (module 'propagation'): ns3::ChannelCondition::LosConditionValue [enumeration] module.add_enum('LosConditionValue', ['LOS', 'NLOS', 'NLOSv', 'LC_ND'], outer_class=root_module['ns3::ChannelCondition']) ## channel-condition-model.h (module 'propagation'): ns3::ChannelCondition::O2iConditionValue [enumeration] module.add_enum('O2iConditionValue', ['O2O', 'O2I', 'I2I', 'O2I_ND'], outer_class=root_module['ns3::ChannelCondition']) ## channel-condition-model.h (module 'propagation'): ns3::ChannelConditionModel [class] module.add_class('ChannelConditionModel', parent=root_module['ns3::Object']) ## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable [class] module.add_class('ConstantRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## propagation-delay-model.h (module 'propagation'): ns3::ConstantSpeedPropagationDelayModel [class] module.add_class('ConstantSpeedPropagationDelayModel', parent=root_module['ns3::PropagationDelayModel']) ## cost231-propagation-loss-model.h (module 'propagation'): ns3::Cost231PropagationLossModel [class] module.add_class('Cost231PropagationLossModel', parent=root_module['ns3::PropagationLossModel']) ## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable [class] module.add_class('DeterministicRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable [class] module.add_class('EmpiricalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## attribute.h (module 'core'): ns3::EmptyAttributeAccessor [class] module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor']) ## attribute.h (module 'core'): ns3::EmptyAttributeChecker [class] module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## attribute.h (module 'core'): ns3::EmptyAttributeValue [class] module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable [class] module.add_class('ErlangRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## event-impl.h (module 'core'): ns3::EventImpl [class] module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) ## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable [class] module.add_class('ExponentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## propagation-loss-model.h (module 'propagation'): ns3::FixedRssLossModel [class] module.add_class('FixedRssLossModel', parent=root_module['ns3::PropagationLossModel']) ## propagation-loss-model.h (module 'propagation'): ns3::FriisPropagationLossModel [class] module.add_class('FriisPropagationLossModel', parent=root_module['ns3::PropagationLossModel']) ## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable [class] module.add_class('GammaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## itu-r-1411-los-propagation-loss-model.h (module 'propagation'): ns3::ItuR1411LosPropagationLossModel [class] module.add_class('ItuR1411LosPropagationLossModel', parent=root_module['ns3::PropagationLossModel']) ## itu-r-1411-nlos-over-rooftop-propagation-loss-model.h (module 'propagation'): ns3::ItuR1411NlosOverRooftopPropagationLossModel [class] module.add_class('ItuR1411NlosOverRooftopPropagationLossModel', parent=root_module['ns3::PropagationLossModel']) ## jakes-process.h (module 'propagation'): ns3::JakesProcess [class] module.add_class('JakesProcess', parent=root_module['ns3::Object']) ## jakes-propagation-loss-model.h (module 'propagation'): ns3::JakesPropagationLossModel [class] module.add_class('JakesPropagationLossModel', parent=root_module['ns3::PropagationLossModel']) ## kun-2600-mhz-propagation-loss-model.h (module 'propagation'): ns3::Kun2600MhzPropagationLossModel [class] module.add_class('Kun2600MhzPropagationLossModel', parent=root_module['ns3::PropagationLossModel']) ## propagation-loss-model.h (module 'propagation'): ns3::LogDistancePropagationLossModel [class] module.add_class('LogDistancePropagationLossModel', parent=root_module['ns3::PropagationLossModel']) ## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable [class] module.add_class('LogNormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## propagation-loss-model.h (module 'propagation'): ns3::MatrixPropagationLossModel [class] module.add_class('MatrixPropagationLossModel', parent=root_module['ns3::PropagationLossModel']) ## mobility-model.h (module 'mobility'): ns3::MobilityModel [class] module.add_class('MobilityModel', import_from_module='ns.mobility', parent=root_module['ns3::Object']) typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::MobilityModel const > )', 'ns3::MobilityModel::TracedCallback') typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::MobilityModel const > )*', 'ns3::MobilityModel::TracedCallback*') typehandlers.add_type_alias('void ( * ) ( ns3::Ptr< ns3::MobilityModel const > )&', 'ns3::MobilityModel::TracedCallback&') ## propagation-loss-model.h (module 'propagation'): ns3::NakagamiPropagationLossModel [class] module.add_class('NakagamiPropagationLossModel', parent=root_module['ns3::PropagationLossModel']) ## channel-condition-model.h (module 'propagation'): ns3::NeverLosChannelConditionModel [class] module.add_class('NeverLosChannelConditionModel', parent=root_module['ns3::ChannelConditionModel']) ## channel-condition-model.h (module 'propagation'): ns3::NeverLosVehicleChannelConditionModel [class] module.add_class('NeverLosVehicleChannelConditionModel', parent=root_module['ns3::ChannelConditionModel']) ## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable [class] module.add_class('NormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## okumura-hata-propagation-loss-model.h (module 'propagation'): ns3::OkumuraHataPropagationLossModel [class] module.add_class('OkumuraHataPropagationLossModel', parent=root_module['ns3::PropagationLossModel']) ## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable [class] module.add_class('ParetoRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream']) ## channel-condition-model.h (module 'propagation'): ns3::ThreeGppChannelConditionModel [class] module.add_class('ThreeGppChannelConditionModel', parent=root_module['ns3::ChannelConditionModel']) ## channel-condition-model.h (module 'propagation'): ns3::ThreeGppIndoorMixedOfficeChannelConditionModel [class] module.add_class('ThreeGppIndoorMixedOfficeChannelConditionModel', parent=root_module['ns3::ThreeGppChannelConditionModel']) ## three-gpp-propagation-loss-model.h (module 'propagation'): ns3::ThreeGppIndoorOfficePropagationLossModel [class] module.add_class('ThreeGppIndoorOfficePropagationLossModel', parent=root_module['ns3::ThreeGppPropagationLossModel']) ## channel-condition-model.h (module 'propagation'): ns3::ThreeGppIndoorOpenOfficeChannelConditionModel [class] module.add_class('ThreeGppIndoorOpenOfficeChannelConditionModel', parent=root_module['ns3::ThreeGppChannelConditionModel']) ## channel-condition-model.h (module 'propagation'): ns3::ThreeGppRmaChannelConditionModel [class] module.add_class('ThreeGppRmaChannelConditionModel', parent=root_module['ns3::ThreeGppChannelConditionModel']) ## channel-condition-model.h (module 'propagation'): ns3::ThreeGppUmaChannelConditionModel [class] module.add_class('ThreeGppUmaChannelConditionModel', parent=root_module['ns3::ThreeGppChannelConditionModel']) ## channel-condition-model.h (module 'propagation'): ns3::ThreeGppUmiStreetCanyonChannelConditionModel [class] module.add_class('ThreeGppUmiStreetCanyonChannelConditionModel', parent=root_module['ns3::ThreeGppChannelConditionModel']) ## three-gpp-v2v-propagation-loss-model.h (module 'propagation'): ns3::ThreeGppV2vHighwayPropagationLossModel [class] module.add_class('ThreeGppV2vHighwayPropagationLossModel', parent=root_module['ns3::ThreeGppV2vUrbanPropagationLossModel']) ## nstime.h (module 'core'): ns3::TimeValue [class] module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## type-id.h (module 'core'): ns3::TypeIdChecker [class] module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## type-id.h (module 'core'): ns3::TypeIdValue [class] module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## vector.h (module 'core'): ns3::Vector2DChecker [class] module.add_class('Vector2DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## vector.h (module 'core'): ns3::Vector2DValue [class] module.add_class('Vector2DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## vector.h (module 'core'): ns3::Vector3DChecker [class] module.add_class('Vector3DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker']) ## vector.h (module 'core'): ns3::Vector3DValue [class] module.add_class('Vector3DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue']) ## channel-condition-model.h (module 'propagation'): ns3::AlwaysLosChannelConditionModel [class] module.add_class('AlwaysLosChannelConditionModel', parent=root_module['ns3::ChannelConditionModel']) ## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class] module.add_class('CallbackImpl', import_from_module='ns.core', parent=root_module['ns3::CallbackImplBase'], template_parameters=['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty']) ## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<const ns3::MobilityModel>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class] module.add_class('CallbackImpl', import_from_module='ns.core', parent=root_module['ns3::CallbackImplBase'], template_parameters=['void', 'ns3::Ptr<const ns3::MobilityModel>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty']) ## probabilistic-v2v-channel-condition-model.h (module 'propagation'): ns3::ProbabilisticV2vHighwayChannelConditionModel [class] module.add_class('ProbabilisticV2vHighwayChannelConditionModel', parent=root_module['ns3::ThreeGppChannelConditionModel']) ## probabilistic-v2v-channel-condition-model.h (module 'propagation'): ns3::ProbabilisticV2vUrbanChannelConditionModel [class] module.add_class('ProbabilisticV2vUrbanChannelConditionModel', parent=root_module['ns3::ThreeGppChannelConditionModel']) module.add_container('std::map< std::string, ns3::LogComponent * >', ('std::string', 'ns3::LogComponent *'), container_type='map') typehandlers.add_type_alias('ns3::Vector3D', 'ns3::Vector') typehandlers.add_type_alias('ns3::Vector3D*', 'ns3::Vector*') typehandlers.add_type_alias('ns3::Vector3D&', 'ns3::Vector&') module.add_typedef(root_module['ns3::Vector3D'], 'Vector') typehandlers.add_type_alias('ns3::Vector3DValue', 'ns3::VectorValue') typehandlers.add_type_alias('ns3::Vector3DValue*', 'ns3::VectorValue*') typehandlers.add_type_alias('ns3::Vector3DValue&', 'ns3::VectorValue&') module.add_typedef(root_module['ns3::Vector3DValue'], 'VectorValue') typehandlers.add_type_alias('ns3::Vector3DChecker', 'ns3::VectorChecker') typehandlers.add_type_alias('ns3::Vector3DChecker*', 'ns3::VectorChecker*') typehandlers.add_type_alias('ns3::Vector3DChecker&', 'ns3::VectorChecker&') module.add_typedef(root_module['ns3::Vector3DChecker'], 'VectorChecker') typehandlers.add_type_alias('void ( * ) ( std::ostream & )', 'ns3::TimePrinter') typehandlers.add_type_alias('void ( * ) ( std::ostream & )*', 'ns3::TimePrinter*') typehandlers.add_type_alias('void ( * ) ( std::ostream & )&', 'ns3::TimePrinter&') typehandlers.add_type_alias('void ( * ) ( std::ostream & )', 'ns3::NodePrinter') typehandlers.add_type_alias('void ( * ) ( std::ostream & )*', 'ns3::NodePrinter*') typehandlers.add_type_alias('void ( * ) ( std::ostream & )&', 'ns3::NodePrinter&') ## Register a nested module for the namespace FatalImpl nested_module = module.add_cpp_namespace('FatalImpl') register_types_ns3_FatalImpl(nested_module) ## Register a nested module for the namespace Hash nested_module = module.add_cpp_namespace('Hash') register_types_ns3_Hash(nested_module) ## Register a nested module for the namespace TracedValueCallback nested_module = module.add_cpp_namespace('TracedValueCallback') register_types_ns3_TracedValueCallback(nested_module) def register_types_ns3_FatalImpl(module): root_module = module.get_root() def register_types_ns3_Hash(module): root_module = module.get_root() ## hash-function.h (module 'core'): ns3::Hash::Implementation [class] module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) typehandlers.add_type_alias('uint32_t ( * ) ( char const *, std::size_t const )', 'ns3::Hash::Hash32Function_ptr') typehandlers.add_type_alias('uint32_t ( * ) ( char const *, std::size_t const )*', 'ns3::Hash::Hash32Function_ptr*') typehandlers.add_type_alias('uint32_t ( * ) ( char const *, std::size_t const )&', 'ns3::Hash::Hash32Function_ptr&') typehandlers.add_type_alias('uint64_t ( * ) ( char const *, std::size_t const )', 'ns3::Hash::Hash64Function_ptr') typehandlers.add_type_alias('uint64_t ( * ) ( char const *, std::size_t const )*', 'ns3::Hash::Hash64Function_ptr*') typehandlers.add_type_alias('uint64_t ( * ) ( char const *, std::size_t const )&', 'ns3::Hash::Hash64Function_ptr&') ## Register a nested module for the namespace Function nested_module = module.add_cpp_namespace('Function') register_types_ns3_Hash_Function(nested_module) def register_types_ns3_Hash_Function(module): root_module = module.get_root() ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class] module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class] module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class] module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class] module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation']) def register_types_ns3_TracedValueCallback(module): root_module = module.get_root() typehandlers.add_type_alias('void ( * ) ( ns3::Time, ns3::Time )', 'ns3::TracedValueCallback::Time') typehandlers.add_type_alias('void ( * ) ( ns3::Time, ns3::Time )*', 'ns3::TracedValueCallback::Time*') typehandlers.add_type_alias('void ( * ) ( ns3::Time, ns3::Time )&', 'ns3::TracedValueCallback::Time&') def register_methods(root_module): register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList']) register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item']) register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase']) register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeAccessor >']) register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeChecker >']) register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeValue >']) register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, root_module['ns3::DefaultDeleter< ns3::CallbackImplBase >']) register_Ns3DefaultDeleter__Ns3EventImpl_methods(root_module, root_module['ns3::DefaultDeleter< ns3::EventImpl >']) register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Hash::Implementation >']) register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::TraceSourceAccessor >']) register_Ns3EventId_methods(root_module, root_module['ns3::EventId']) register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher']) register_Ns3LogComponent_methods(root_module, root_module['ns3::LogComponent']) register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase']) register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter']) register_Ns3ParameterLogger_methods(root_module, root_module['ns3::ParameterLogger']) register_Ns3PropagationCache__Ns3JakesProcess_methods(root_module, root_module['ns3::PropagationCache< ns3::JakesProcess >']) register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >']) register_Ns3Time_methods(root_module, root_module['ns3::Time']) register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit']) register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId']) register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation']) register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation']) register_Ns3Vector2D_methods(root_module, root_module['ns3::Vector2D']) register_Ns3Vector3D_methods(root_module, root_module['ns3::Vector3D']) register_Ns3Empty_methods(root_module, root_module['ns3::empty']) register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t']) register_Ns3Object_methods(root_module, root_module['ns3::Object']) register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator']) register_Ns3PropagationDelayModel_methods(root_module, root_module['ns3::PropagationDelayModel']) register_Ns3PropagationLossModel_methods(root_module, root_module['ns3::PropagationLossModel']) register_Ns3RandomPropagationDelayModel_methods(root_module, root_module['ns3::RandomPropagationDelayModel']) register_Ns3RandomPropagationLossModel_methods(root_module, root_module['ns3::RandomPropagationLossModel']) register_Ns3RandomVariableStream_methods(root_module, root_module['ns3::RandomVariableStream']) register_Ns3RangePropagationLossModel_methods(root_module, root_module['ns3::RangePropagationLossModel']) register_Ns3SequentialRandomVariable_methods(root_module, root_module['ns3::SequentialRandomVariable']) register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >']) register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >']) register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >']) register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >']) register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >']) register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >']) register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >']) register_Ns3ThreeGppPropagationLossModel_methods(root_module, root_module['ns3::ThreeGppPropagationLossModel']) register_Ns3ThreeGppRmaPropagationLossModel_methods(root_module, root_module['ns3::ThreeGppRmaPropagationLossModel']) register_Ns3ThreeGppUmaPropagationLossModel_methods(root_module, root_module['ns3::ThreeGppUmaPropagationLossModel']) register_Ns3ThreeGppUmiStreetCanyonPropagationLossModel_methods(root_module, root_module['ns3::ThreeGppUmiStreetCanyonPropagationLossModel']) register_Ns3ThreeGppV2vUrbanPropagationLossModel_methods(root_module, root_module['ns3::ThreeGppV2vUrbanPropagationLossModel']) register_Ns3ThreeLogDistancePropagationLossModel_methods(root_module, root_module['ns3::ThreeLogDistancePropagationLossModel']) register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor']) register_Ns3TriangularRandomVariable_methods(root_module, root_module['ns3::TriangularRandomVariable']) register_Ns3TwoRayGroundPropagationLossModel_methods(root_module, root_module['ns3::TwoRayGroundPropagationLossModel']) register_Ns3UniformRandomVariable_methods(root_module, root_module['ns3::UniformRandomVariable']) register_Ns3WeibullRandomVariable_methods(root_module, root_module['ns3::WeibullRandomVariable']) register_Ns3ZetaRandomVariable_methods(root_module, root_module['ns3::ZetaRandomVariable']) register_Ns3ZipfRandomVariable_methods(root_module, root_module['ns3::ZipfRandomVariable']) register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor']) register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker']) register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue']) register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker']) register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase']) register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue']) register_Ns3ChannelCondition_methods(root_module, root_module['ns3::ChannelCondition']) register_Ns3ChannelConditionModel_methods(root_module, root_module['ns3::ChannelConditionModel']) register_Ns3ConstantRandomVariable_methods(root_module, root_module['ns3::ConstantRandomVariable']) register_Ns3ConstantSpeedPropagationDelayModel_methods(root_module, root_module['ns3::ConstantSpeedPropagationDelayModel']) register_Ns3Cost231PropagationLossModel_methods(root_module, root_module['ns3::Cost231PropagationLossModel']) register_Ns3DeterministicRandomVariable_methods(root_module, root_module['ns3::DeterministicRandomVariable']) register_Ns3EmpiricalRandomVariable_methods(root_module, root_module['ns3::EmpiricalRandomVariable']) register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor']) register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker']) register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue']) register_Ns3ErlangRandomVariable_methods(root_module, root_module['ns3::ErlangRandomVariable']) register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl']) register_Ns3ExponentialRandomVariable_methods(root_module, root_module['ns3::ExponentialRandomVariable']) register_Ns3FixedRssLossModel_methods(root_module, root_module['ns3::FixedRssLossModel']) register_Ns3FriisPropagationLossModel_methods(root_module, root_module['ns3::FriisPropagationLossModel']) register_Ns3GammaRandomVariable_methods(root_module, root_module['ns3::GammaRandomVariable']) register_Ns3ItuR1411LosPropagationLossModel_methods(root_module, root_module['ns3::ItuR1411LosPropagationLossModel']) register_Ns3ItuR1411NlosOverRooftopPropagationLossModel_methods(root_module, root_module['ns3::ItuR1411NlosOverRooftopPropagationLossModel']) register_Ns3JakesProcess_methods(root_module, root_module['ns3::JakesProcess']) register_Ns3JakesPropagationLossModel_methods(root_module, root_module['ns3::JakesPropagationLossModel']) register_Ns3Kun2600MhzPropagationLossModel_methods(root_module, root_module['ns3::Kun2600MhzPropagationLossModel']) register_Ns3LogDistancePropagationLossModel_methods(root_module, root_module['ns3::LogDistancePropagationLossModel']) register_Ns3LogNormalRandomVariable_methods(root_module, root_module['ns3::LogNormalRandomVariable']) register_Ns3MatrixPropagationLossModel_methods(root_module, root_module['ns3::MatrixPropagationLossModel']) register_Ns3MobilityModel_methods(root_module, root_module['ns3::MobilityModel']) register_Ns3NakagamiPropagationLossModel_methods(root_module, root_module['ns3::NakagamiPropagationLossModel']) register_Ns3NeverLosChannelConditionModel_methods(root_module, root_module['ns3::NeverLosChannelConditionModel']) register_Ns3NeverLosVehicleChannelConditionModel_methods(root_module, root_module['ns3::NeverLosVehicleChannelConditionModel']) register_Ns3NormalRandomVariable_methods(root_module, root_module['ns3::NormalRandomVariable']) register_Ns3OkumuraHataPropagationLossModel_methods(root_module, root_module['ns3::OkumuraHataPropagationLossModel']) register_Ns3ParetoRandomVariable_methods(root_module, root_module['ns3::ParetoRandomVariable']) register_Ns3ThreeGppChannelConditionModel_methods(root_module, root_module['ns3::ThreeGppChannelConditionModel']) register_Ns3ThreeGppIndoorMixedOfficeChannelConditionModel_methods(root_module, root_module['ns3::ThreeGppIndoorMixedOfficeChannelConditionModel']) register_Ns3ThreeGppIndoorOfficePropagationLossModel_methods(root_module, root_module['ns3::ThreeGppIndoorOfficePropagationLossModel']) register_Ns3ThreeGppIndoorOpenOfficeChannelConditionModel_methods(root_module, root_module['ns3::ThreeGppIndoorOpenOfficeChannelConditionModel']) register_Ns3ThreeGppRmaChannelConditionModel_methods(root_module, root_module['ns3::ThreeGppRmaChannelConditionModel']) register_Ns3ThreeGppUmaChannelConditionModel_methods(root_module, root_module['ns3::ThreeGppUmaChannelConditionModel']) register_Ns3ThreeGppUmiStreetCanyonChannelConditionModel_methods(root_module, root_module['ns3::ThreeGppUmiStreetCanyonChannelConditionModel']) register_Ns3ThreeGppV2vHighwayPropagationLossModel_methods(root_module, root_module['ns3::ThreeGppV2vHighwayPropagationLossModel']) register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue']) register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker']) register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue']) register_Ns3Vector2DChecker_methods(root_module, root_module['ns3::Vector2DChecker']) register_Ns3Vector2DValue_methods(root_module, root_module['ns3::Vector2DValue']) register_Ns3Vector3DChecker_methods(root_module, root_module['ns3::Vector3DChecker']) register_Ns3Vector3DValue_methods(root_module, root_module['ns3::Vector3DValue']) register_Ns3AlwaysLosChannelConditionModel_methods(root_module, root_module['ns3::AlwaysLosChannelConditionModel']) register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3MobilityModel__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, ns3::Ptr<const ns3::MobilityModel>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >']) register_Ns3ProbabilisticV2vHighwayChannelConditionModel_methods(root_module, root_module['ns3::ProbabilisticV2vHighwayChannelConditionModel']) register_Ns3ProbabilisticV2vUrbanChannelConditionModel_methods(root_module, root_module['ns3::ProbabilisticV2vUrbanChannelConditionModel']) register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation']) register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a']) register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32']) register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64']) register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3']) return def register_Ns3AttributeConstructionList_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [constructor] cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::Ptr<ns3::AttributeValue> value) [member function] cls.add_method('Add', 'void', [param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::CIterator ns3::AttributeConstructionList::Begin() const [member function] cls.add_method('Begin', 'ns3::AttributeConstructionList::CIterator', [], is_const=True) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::CIterator ns3::AttributeConstructionList::End() const [member function] cls.add_method('End', 'ns3::AttributeConstructionList::CIterator', [], is_const=True) ## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function] cls.add_method('Find', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True) return def register_Ns3AttributeConstructionListItem_methods(root_module, cls): ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor] cls.add_constructor([]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [constructor] cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')]) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable] cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False) return def register_Ns3CallbackBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function] cls.add_method('GetImpl', 'ns3::Ptr< ns3::CallbackImplBase >', [], is_const=True) ## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')], visibility='protected') return def register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, cls): ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor>::DefaultDeleter() [constructor] cls.add_constructor([]) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeAccessor> const & arg0) [constructor] cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeAccessor > const &', 'arg0')]) ## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeAccessor>::Delete(ns3::AttributeAccessor * object) [member function] cls.add_method('Delete', 'void', [param('ns3::AttributeAccessor *', 'object')], is_static=True) return def register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, cls): ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker>::DefaultDeleter() [constructor] cls.add_constructor([]) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeChecker> const & arg0) [constructor] cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeChecker > const &', 'arg0')]) ## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeChecker>::Delete(ns3::AttributeChecker * object) [member function] cls.add_method('Delete', 'void', [param('ns3::AttributeChecker *', 'object')], is_static=True) return def register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, cls): ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue>::DefaultDeleter() [constructor] cls.add_constructor([]) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeValue> const & arg0) [constructor] cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeValue > const &', 'arg0')]) ## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeValue>::Delete(ns3::AttributeValue * object) [member function] cls.add_method('Delete', 'void', [param('ns3::AttributeValue *', 'object')], is_static=True) return def register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, cls): ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase>::DefaultDeleter() [constructor] cls.add_constructor([]) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase>::DefaultDeleter(ns3::DefaultDeleter<ns3::CallbackImplBase> const & arg0) [constructor] cls.add_constructor([param('ns3::DefaultDeleter< ns3::CallbackImplBase > const &', 'arg0')]) ## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::CallbackImplBase>::Delete(ns3::CallbackImplBase * object) [member function] cls.add_method('Delete', 'void', [param('ns3::CallbackImplBase *', 'object')], is_static=True) return def register_Ns3DefaultDeleter__Ns3EventImpl_methods(root_module, cls): ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::EventImpl>::DefaultDeleter() [constructor] cls.add_constructor([]) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::EventImpl>::DefaultDeleter(ns3::DefaultDeleter<ns3::EventImpl> const & arg0) [constructor] cls.add_constructor([param('ns3::DefaultDeleter< ns3::EventImpl > const &', 'arg0')]) ## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::EventImpl>::Delete(ns3::EventImpl * object) [member function] cls.add_method('Delete', 'void', [param('ns3::EventImpl *', 'object')], is_static=True) return def register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, cls): ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation>::DefaultDeleter() [constructor] cls.add_constructor([]) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation>::DefaultDeleter(ns3::DefaultDeleter<ns3::Hash::Implementation> const & arg0) [constructor] cls.add_constructor([param('ns3::DefaultDeleter< ns3::Hash::Implementation > const &', 'arg0')]) ## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::Hash::Implementation>::Delete(ns3::Hash::Implementation * object) [member function] cls.add_method('Delete', 'void', [param('ns3::Hash::Implementation *', 'object')], is_static=True) return def register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, cls): ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor>::DefaultDeleter() [constructor] cls.add_constructor([]) ## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor>::DefaultDeleter(ns3::DefaultDeleter<ns3::TraceSourceAccessor> const & arg0) [constructor] cls.add_constructor([param('ns3::DefaultDeleter< ns3::TraceSourceAccessor > const &', 'arg0')]) ## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::TraceSourceAccessor>::Delete(ns3::TraceSourceAccessor * object) [member function] cls.add_method('Delete', 'void', [param('ns3::TraceSourceAccessor *', 'object')], is_static=True) return def register_Ns3EventId_methods(root_module, cls): cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('<') ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [constructor] cls.add_constructor([param('ns3::EventId const &', 'arg0')]) ## event-id.h (module 'core'): ns3::EventId::EventId() [constructor] cls.add_constructor([]) ## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')]) ## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function] cls.add_method('GetContext', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function] cls.add_method('GetTs', 'uint64_t', [], is_const=True) ## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function] cls.add_method('GetUid', 'uint32_t', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function] cls.add_method('IsExpired', 'bool', [], is_const=True) ## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function] cls.add_method('IsRunning', 'bool', [], is_const=True) ## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function] cls.add_method('PeekEventImpl', 'ns3::EventImpl *', [], is_const=True) ## event-id.h (module 'core'): void ns3::EventId::Remove() [member function] cls.add_method('Remove', 'void', []) return def register_Ns3Hasher_methods(root_module, cls): ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [constructor] cls.add_constructor([param('ns3::Hasher const &', 'arg0')]) ## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor] cls.add_constructor([]) ## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor] cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, std::size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('std::size_t const', 'size')]) ## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function] cls.add_method('GetHash32', 'uint32_t', [param('std::string const', 's')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, std::size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('std::size_t const', 'size')]) ## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function] cls.add_method('GetHash64', 'uint64_t', [param('std::string const', 's')]) ## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function] cls.add_method('clear', 'ns3::Hasher &', []) return def register_Ns3LogComponent_methods(root_module, cls): ## log.h (module 'core'): ns3::LogComponent::LogComponent(ns3::LogComponent const & arg0) [constructor] cls.add_constructor([param('ns3::LogComponent const &', 'arg0')]) ## log.h (module 'core'): ns3::LogComponent::LogComponent(std::string const & name, std::string const & file, ns3::LogLevel const mask=::ns3::LogLevel::LOG_NONE) [constructor] cls.add_constructor([param('std::string const &', 'name'), param('std::string const &', 'file'), param('ns3::LogLevel const', 'mask', default_value='::ns3::LogLevel::LOG_NONE')]) ## log.h (module 'core'): void ns3::LogComponent::Disable(ns3::LogLevel const level) [member function] cls.add_method('Disable', 'void', [param('ns3::LogLevel const', 'level')]) ## log.h (module 'core'): void ns3::LogComponent::Enable(ns3::LogLevel const level) [member function] cls.add_method('Enable', 'void', [param('ns3::LogLevel const', 'level')]) ## log.h (module 'core'): std::string ns3::LogComponent::File() const [member function] cls.add_method('File', 'std::string', [], is_const=True) ## log.h (module 'core'): static ns3::LogComponent::ComponentList * ns3::LogComponent::GetComponentList() [member function] cls.add_method('GetComponentList', 'ns3::LogComponent::ComponentList *', [], is_static=True) ## log.h (module 'core'): static std::string ns3::LogComponent::GetLevelLabel(ns3::LogLevel const level) [member function] cls.add_method('GetLevelLabel', 'std::string', [param('ns3::LogLevel const', 'level')], is_static=True) ## log.h (module 'core'): bool ns3::LogComponent::IsEnabled(ns3::LogLevel const level) const [member function] cls.add_method('IsEnabled', 'bool', [param('ns3::LogLevel const', 'level')], is_const=True) ## log.h (module 'core'): bool ns3::LogComponent::IsNoneEnabled() const [member function] cls.add_method('IsNoneEnabled', 'bool', [], is_const=True) ## log.h (module 'core'): char const * ns3::LogComponent::Name() const [member function] cls.add_method('Name', 'char const *', [], is_const=True) ## log.h (module 'core'): void ns3::LogComponent::SetMask(ns3::LogLevel const level) [member function] cls.add_method('SetMask', 'void', [param('ns3::LogLevel const', 'level')]) return def register_Ns3ObjectBase_methods(root_module, cls): ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor] cls.add_constructor([]) ## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [constructor] cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')]) ## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function] cls.add_method('GetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue &', 'value')], is_const=True) ## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_pure_virtual=True, is_virtual=True) ## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttribute', 'void', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function] cls.add_method('SetAttributeFailSafe', 'bool', [param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceConnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnect', 'bool', [param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function] cls.add_method('TraceDisconnectWithoutContext', 'bool', [param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')]) ## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function] cls.add_method('ConstructSelf', 'void', [param('ns3::AttributeConstructionList const &', 'attributes')], visibility='protected') ## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function] cls.add_method('NotifyConstructionCompleted', 'void', [], is_virtual=True, visibility='protected') return def register_Ns3ObjectDeleter_methods(root_module, cls): ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor] cls.add_constructor([]) ## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [constructor] cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')]) ## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function] cls.add_method('Delete', 'void', [param('ns3::Object *', 'object')], is_static=True) return def register_Ns3ParameterLogger_methods(root_module, cls): ## log.h (module 'core'): ns3::ParameterLogger::ParameterLogger(ns3::ParameterLogger const & arg0) [constructor] cls.add_constructor([param('ns3::ParameterLogger const &', 'arg0')]) ## log.h (module 'core'): ns3::ParameterLogger::ParameterLogger(std::ostream & os) [constructor] cls.add_constructor([param('std::ostream &', 'os')]) return def register_Ns3PropagationCache__Ns3JakesProcess_methods(root_module, cls): ## propagation-cache.h (module 'propagation'): ns3::PropagationCache<ns3::JakesProcess>::PropagationCache(ns3::PropagationCache<ns3::JakesProcess> const & arg0) [constructor] cls.add_constructor([param('ns3::PropagationCache< ns3::JakesProcess > const &', 'arg0')]) ## propagation-cache.h (module 'propagation'): ns3::PropagationCache<ns3::JakesProcess>::PropagationCache() [constructor] cls.add_constructor([]) ## propagation-cache.h (module 'propagation'): void ns3::PropagationCache<ns3::JakesProcess>::AddPathData(ns3::Ptr<ns3::JakesProcess> data, ns3::Ptr<const ns3::MobilityModel> a, ns3::Ptr<const ns3::MobilityModel> b, uint32_t modelUid) [member function] cls.add_method('AddPathData', 'void', [param('ns3::Ptr< ns3::JakesProcess >', 'data'), param('ns3::Ptr< ns3::MobilityModel const >', 'a'), param('ns3::Ptr< ns3::MobilityModel const >', 'b'), param('uint32_t', 'modelUid')]) ## propagation-cache.h (module 'propagation'): ns3::Ptr<ns3::JakesProcess> ns3::PropagationCache<ns3::JakesProcess>::GetPathData(ns3::Ptr<const ns3::MobilityModel> a, ns3::Ptr<const ns3::MobilityModel> b, uint32_t modelUid) [member function] cls.add_method('GetPathData', 'ns3::Ptr< ns3::JakesProcess >', [param('ns3::Ptr< ns3::MobilityModel const >', 'a'), param('ns3::Ptr< ns3::MobilityModel const >', 'b'), param('uint32_t', 'modelUid')]) return def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')]) return def register_Ns3Time_methods(root_module, cls): cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('>=') cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::Time'], param('ns3::Time const &', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::int64x64_t const &', 'right')) cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right')) cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right')) cls.add_output_stream_operator() ## nstime.h (module 'core'): ns3::Time::Time() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [constructor] cls.add_constructor([param('ns3::Time const &', 'o')]) ## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor] cls.add_constructor([param('double', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor] cls.add_constructor([param('int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor] cls.add_constructor([param('long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor] cls.add_constructor([param('long long int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor] cls.add_constructor([param('unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor] cls.add_constructor([param('long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor] cls.add_constructor([param('long long unsigned int', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'v')]) ## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor] cls.add_constructor([param('std::string const &', 's')]) ## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit=::ns3::Time::Unit::AUTO) const [member function] cls.add_method('As', 'ns3::TimeWithUnit', [param('ns3::Time::Unit const', 'unit', default_value='::ns3::Time::Unit::AUTO')], is_const=True) ## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function] cls.add_method('Compare', 'int', [param('ns3::Time const &', 'o')], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function] cls.add_method('From', 'ns3::Time', [param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function] cls.add_method('FromDouble', 'ns3::Time', [param('double', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function] cls.add_method('FromInteger', 'ns3::Time', [param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function] cls.add_method('GetDays', 'double', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function] cls.add_method('GetFemtoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function] cls.add_method('GetHours', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function] cls.add_method('GetInteger', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function] cls.add_method('GetMicroSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function] cls.add_method('GetMilliSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function] cls.add_method('GetMinutes', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function] cls.add_method('GetNanoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function] cls.add_method('GetPicoSeconds', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function] cls.add_method('GetResolution', 'ns3::Time::Unit', [], is_static=True) ## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function] cls.add_method('GetSeconds', 'double', [], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function] cls.add_method('GetTimeStep', 'int64_t', [], is_const=True) ## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function] cls.add_method('GetYears', 'double', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function] cls.add_method('IsNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function] cls.add_method('IsPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function] cls.add_method('IsStrictlyNegative', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function] cls.add_method('IsStrictlyPositive', 'bool', [], is_const=True) ## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function] cls.add_method('IsZero', 'bool', [], is_const=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function] cls.add_method('Max', 'ns3::Time', [], is_static=True) ## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function] cls.add_method('Min', 'ns3::Time', [], is_static=True) ## nstime.h (module 'core'): ns3::Time ns3::Time::RoundTo(ns3::Time::Unit unit) const [member function] cls.add_method('RoundTo', 'ns3::Time', [param('ns3::Time::Unit', 'unit')], is_const=True) ## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function] cls.add_method('SetResolution', 'void', [param('ns3::Time::Unit', 'resolution')], is_static=True) ## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function] cls.add_method('StaticInit', 'bool', [], is_static=True) ## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function] cls.add_method('To', 'ns3::int64x64_t', [param('ns3::Time::Unit', 'unit')], is_const=True) ## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function] cls.add_method('ToDouble', 'double', [param('ns3::Time::Unit', 'unit')], is_const=True) ## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function] cls.add_method('ToInteger', 'int64_t', [param('ns3::Time::Unit', 'unit')], is_const=True) return def register_Ns3TimeWithUnit_methods(root_module, cls): cls.add_output_stream_operator() ## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [constructor] cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')]) ## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor] cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')]) return def register_Ns3TypeId_methods(root_module, cls): cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('!=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('<') ## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor] cls.add_constructor([param('char const *', 'name')]) ## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [constructor] cls.add_constructor([param('ns3::TypeId const &', 'o')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<const ns3::AttributeAccessor> accessor, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<const ns3::AttributeAccessor> accessor, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function] cls.add_method('AddAttribute', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<const ns3::TraceSourceAccessor> accessor, std::string callback, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function] cls.add_method('AddTraceSource', 'ns3::TypeId', [param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(std::size_t i) const [member function] cls.add_method('GetAttribute', 'ns3::TypeId::AttributeInformation', [param('std::size_t', 'i')], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(std::size_t i) const [member function] cls.add_method('GetAttributeFullName', 'std::string', [param('std::size_t', 'i')], is_const=True) ## type-id.h (module 'core'): std::size_t ns3::TypeId::GetAttributeN() const [member function] cls.add_method('GetAttributeN', 'std::size_t', [], is_const=True) ## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ns3::TypeId::GetConstructor() const [member function] cls.add_method('GetConstructor', 'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function] cls.add_method('GetGroupName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId::hash_t ns3::TypeId::GetHash() const [member function] cls.add_method('GetHash', 'ns3::TypeId::hash_t', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function] cls.add_method('GetName', 'std::string', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function] cls.add_method('GetParent', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint16_t i) [member function] cls.add_method('GetRegistered', 'ns3::TypeId', [param('uint16_t', 'i')], is_static=True) ## type-id.h (module 'core'): static uint16_t ns3::TypeId::GetRegisteredN() [member function] cls.add_method('GetRegisteredN', 'uint16_t', [], is_static=True) ## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function] cls.add_method('GetSize', 'std::size_t', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(std::size_t i) const [member function] cls.add_method('GetTraceSource', 'ns3::TypeId::TraceSourceInformation', [param('std::size_t', 'i')], is_const=True) ## type-id.h (module 'core'): std::size_t ns3::TypeId::GetTraceSourceN() const [member function] cls.add_method('GetTraceSourceN', 'std::size_t', [], is_const=True) ## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function] cls.add_method('GetUid', 'uint16_t', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function] cls.add_method('HasConstructor', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function] cls.add_method('HasParent', 'bool', [], is_const=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function] cls.add_method('HideFromDocumentation', 'ns3::TypeId', []) ## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function] cls.add_method('IsChildOf', 'bool', [param('ns3::TypeId', 'other')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function] cls.add_method('LookupAttributeByName', 'bool', [param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)], is_const=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(ns3::TypeId::hash_t hash) [member function] cls.add_method('LookupByHash', 'ns3::TypeId', [param('uint32_t', 'hash')], is_static=True) ## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(ns3::TypeId::hash_t hash, ns3::TypeId * tid) [member function] cls.add_method('LookupByHashFailSafe', 'bool', [param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')], is_static=True) ## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function] cls.add_method('LookupByName', 'ns3::TypeId', [param('std::string', 'name')], is_static=True) ## type-id.h (module 'core'): ns3::Ptr<const ns3::TraceSourceAccessor> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function] cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name')], is_const=True) ## type-id.h (module 'core'): ns3::Ptr<const ns3::TraceSourceAccessor> ns3::TypeId::LookupTraceSourceByName(std::string name, ns3::TypeId::TraceSourceInformation * info) const [member function] cls.add_method('LookupTraceSourceByName', 'ns3::Ptr< ns3::TraceSourceAccessor const >', [param('std::string', 'name'), param('ns3::TypeId::TraceSourceInformation *', 'info')], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function] cls.add_method('MustHideFromDocumentation', 'bool', [], is_const=True) ## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(std::size_t i, ns3::Ptr<const ns3::AttributeValue> initialValue) [member function] cls.add_method('SetAttributeInitialValue', 'bool', [param('std::size_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function] cls.add_method('SetGroupName', 'ns3::TypeId', [param('std::string', 'groupName')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function] cls.add_method('SetParent', 'ns3::TypeId', [param('ns3::TypeId', 'tid')]) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function] cls.add_method('SetSize', 'ns3::TypeId', [param('std::size_t', 'size')]) ## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t uid) [member function] cls.add_method('SetUid', 'void', [param('uint16_t', 'uid')]) return def register_Ns3TypeIdAttributeInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [constructor] cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable] cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False) cls.add_instance_attribute('flags', 'uint32_t', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable] cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable] cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportLevel [variable] cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportMsg [variable] cls.add_instance_attribute('supportMsg', 'std::string', is_const=False) return def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [constructor] cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')]) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable] cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable] cls.add_instance_attribute('callback', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable] cls.add_instance_attribute('help', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable] cls.add_instance_attribute('name', 'std::string', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportLevel [variable] cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False) ## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportMsg [variable] cls.add_instance_attribute('supportMsg', 'std::string', is_const=False) return def register_Ns3Vector2D_methods(root_module, cls): cls.add_output_stream_operator() cls.add_binary_comparison_operator('<') cls.add_binary_numeric_operator('-', root_module['ns3::Vector2D'], root_module['ns3::Vector2D'], param('ns3::Vector2D const &', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::Vector2D'], root_module['ns3::Vector2D'], param('ns3::Vector2D const &', 'right')) cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('<=') ## vector.h (module 'core'): ns3::Vector2D::Vector2D(ns3::Vector2D const & arg0) [constructor] cls.add_constructor([param('ns3::Vector2D const &', 'arg0')]) ## vector.h (module 'core'): ns3::Vector2D::Vector2D(double _x, double _y) [constructor] cls.add_constructor([param('double', '_x'), param('double', '_y')]) ## vector.h (module 'core'): ns3::Vector2D::Vector2D() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): double ns3::Vector2D::GetLength() const [member function] cls.add_method('GetLength', 'double', [], is_const=True) ## vector.h (module 'core'): ns3::Vector2D::x [variable] cls.add_instance_attribute('x', 'double', is_const=False) ## vector.h (module 'core'): ns3::Vector2D::y [variable] cls.add_instance_attribute('y', 'double', is_const=False) return def register_Ns3Vector3D_methods(root_module, cls): cls.add_output_stream_operator() cls.add_binary_comparison_operator('<') cls.add_binary_numeric_operator('-', root_module['ns3::Vector3D'], root_module['ns3::Vector3D'], param('ns3::Vector3D const &', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::Vector3D'], root_module['ns3::Vector3D'], param('ns3::Vector3D const &', 'right')) cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('>=') cls.add_binary_comparison_operator('>') cls.add_binary_comparison_operator('<=') ## vector.h (module 'core'): ns3::Vector3D::Vector3D(ns3::Vector3D const & arg0) [constructor] cls.add_constructor([param('ns3::Vector3D const &', 'arg0')]) ## vector.h (module 'core'): ns3::Vector3D::Vector3D(double _x, double _y, double _z) [constructor] cls.add_constructor([param('double', '_x'), param('double', '_y'), param('double', '_z')]) ## vector.h (module 'core'): ns3::Vector3D::Vector3D() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): double ns3::Vector3D::GetLength() const [member function] cls.add_method('GetLength', 'double', [], is_const=True) ## vector.h (module 'core'): ns3::Vector3D::x [variable] cls.add_instance_attribute('x', 'double', is_const=False) ## vector.h (module 'core'): ns3::Vector3D::y [variable] cls.add_instance_attribute('y', 'double', is_const=False) ## vector.h (module 'core'): ns3::Vector3D::z [variable] cls.add_instance_attribute('z', 'double', is_const=False) return def register_Ns3Empty_methods(root_module, cls): ## empty.h (module 'core'): ns3::empty::empty() [constructor] cls.add_constructor([]) ## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [constructor] cls.add_constructor([param('ns3::empty const &', 'arg0')]) return def register_Ns3Int64x64_t_methods(root_module, cls): cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::int64x64_t'], param('ns3::Time const &', 'right')) cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right')) cls.add_binary_comparison_operator('!=') cls.add_binary_comparison_operator('<=') cls.add_binary_comparison_operator('>=') cls.add_output_stream_operator() cls.add_binary_comparison_operator('==') cls.add_binary_comparison_operator('<') cls.add_binary_comparison_operator('>') cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right')) cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right')) cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right')) cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right')) cls.add_unary_numeric_operator('-') ## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor] cls.add_constructor([]) ## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(double const value) [constructor] cls.add_constructor([param('double const', 'value')]) ## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long double const value) [constructor] cls.add_constructor([param('long double const', 'value')]) ## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(int const v) [constructor] cls.add_constructor([param('int const', 'v')]) ## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long int const v) [constructor] cls.add_constructor([param('long int const', 'v')]) ## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int const v) [constructor] cls.add_constructor([param('long long int const', 'v')]) ## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int const v) [constructor] cls.add_constructor([param('unsigned int const', 'v')]) ## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int const v) [constructor] cls.add_constructor([param('long unsigned int const', 'v')]) ## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int const v) [constructor] cls.add_constructor([param('long long unsigned int const', 'v')]) ## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t const hi, uint64_t const lo) [constructor] cls.add_constructor([param('int64_t const', 'hi'), param('uint64_t const', 'lo')]) ## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [constructor] cls.add_constructor([param('ns3::int64x64_t const &', 'o')]) ## int64x64-128.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function] cls.add_method('GetDouble', 'double', [], is_const=True) ## int64x64-128.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function] cls.add_method('GetHigh', 'int64_t', [], is_const=True) ## int64x64-128.h (module 'core'): int64_t ns3::int64x64_t::GetInt() const [member function] cls.add_method('GetInt', 'int64_t', [], is_const=True) ## int64x64-128.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function] cls.add_method('GetLow', 'uint64_t', [], is_const=True) ## int64x64-128.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t const v) [member function] cls.add_method('Invert', 'ns3::int64x64_t', [param('uint64_t const', 'v')], is_static=True) ## int64x64-128.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function] cls.add_method('MulByInvert', 'void', [param('ns3::int64x64_t const &', 'o')]) ## int64x64-128.h (module 'core'): int64_t ns3::int64x64_t::Round() const [member function] cls.add_method('Round', 'int64_t', [], is_const=True) ## int64x64-128.h (module 'core'): ns3::int64x64_t::implementation [variable] cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True) return def register_Ns3Object_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::Object() [constructor] cls.add_constructor([]) ## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function] cls.add_method('AggregateObject', 'void', [param('ns3::Ptr< ns3::Object >', 'other')]) ## object.h (module 'core'): void ns3::Object::Dispose() [member function] cls.add_method('Dispose', 'void', []) ## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function] cls.add_method('GetAggregateIterator', 'ns3::Object::AggregateIterator', [], is_const=True) ## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function] cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True) ## object.h (module 'core'): ns3::Ptr<ns3::Object> ns3::Object::GetObject() const [member function] cls.add_method('GetObject', 'ns3::Ptr< ns3::Object >', [], custom_template_method_name='GetObject', is_const=True, template_parameters=['ns3::Object']) ## object.h (module 'core'): ns3::Ptr<ns3::Object> ns3::Object::GetObject(ns3::TypeId tid) const [member function] cls.add_method('GetObject', 'ns3::Ptr< ns3::Object >', [param('ns3::TypeId', 'tid')], custom_template_method_name='GetObject', is_const=True, template_parameters=['ns3::Object']) ## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## object.h (module 'core'): void ns3::Object::Initialize() [member function] cls.add_method('Initialize', 'void', []) ## object.h (module 'core'): bool ns3::Object::IsInitialized() const [member function] cls.add_method('IsInitialized', 'bool', [], is_const=True) ## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [constructor] cls.add_constructor([param('ns3::Object const &', 'o')], visibility='protected') ## object.h (module 'core'): void ns3::Object::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], is_virtual=True, visibility='protected') ## object.h (module 'core'): void ns3::Object::DoInitialize() [member function] cls.add_method('DoInitialize', 'void', [], is_virtual=True, visibility='protected') ## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function] cls.add_method('NotifyNewAggregate', 'void', [], is_virtual=True, visibility='protected') return def register_Ns3ObjectAggregateIterator_methods(root_module, cls): ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [constructor] cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')]) ## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor] cls.add_constructor([]) ## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function] cls.add_method('HasNext', 'bool', [], is_const=True) ## object.h (module 'core'): ns3::Ptr<const ns3::Object> ns3::Object::AggregateIterator::Next() [member function] cls.add_method('Next', 'ns3::Ptr< ns3::Object const >', []) return def register_Ns3PropagationDelayModel_methods(root_module, cls): ## propagation-delay-model.h (module 'propagation'): ns3::PropagationDelayModel::PropagationDelayModel() [constructor] cls.add_constructor([]) ## propagation-delay-model.h (module 'propagation'): ns3::PropagationDelayModel::PropagationDelayModel(ns3::PropagationDelayModel const & arg0) [constructor] cls.add_constructor([param('ns3::PropagationDelayModel const &', 'arg0')]) ## propagation-delay-model.h (module 'propagation'): int64_t ns3::PropagationDelayModel::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')]) ## propagation-delay-model.h (module 'propagation'): ns3::Time ns3::PropagationDelayModel::GetDelay(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('GetDelay', 'ns3::Time', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_pure_virtual=True, is_virtual=True) ## propagation-delay-model.h (module 'propagation'): static ns3::TypeId ns3::PropagationDelayModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-delay-model.h (module 'propagation'): int64_t ns3::PropagationDelayModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], is_pure_virtual=True, is_virtual=True, visibility='private') return def register_Ns3PropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::PropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::PropagationLossModel::PropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): void ns3::PropagationLossModel::SetNext(ns3::Ptr<ns3::PropagationLossModel> next) [member function] cls.add_method('SetNext', 'void', [param('ns3::Ptr< ns3::PropagationLossModel >', 'next')]) ## propagation-loss-model.h (module 'propagation'): ns3::Ptr<ns3::PropagationLossModel> ns3::PropagationLossModel::GetNext() [member function] cls.add_method('GetNext', 'ns3::Ptr< ns3::PropagationLossModel >', []) ## propagation-loss-model.h (module 'propagation'): double ns3::PropagationLossModel::CalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('CalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True) ## propagation-loss-model.h (module 'propagation'): int64_t ns3::PropagationLossModel::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')]) ## propagation-loss-model.h (module 'propagation'): double ns3::PropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_pure_virtual=True, is_virtual=True, visibility='private') ## propagation-loss-model.h (module 'propagation'): int64_t ns3::PropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], is_pure_virtual=True, is_virtual=True, visibility='private') return def register_Ns3RandomPropagationDelayModel_methods(root_module, cls): ## propagation-delay-model.h (module 'propagation'): ns3::RandomPropagationDelayModel::RandomPropagationDelayModel(ns3::RandomPropagationDelayModel const & arg0) [constructor] cls.add_constructor([param('ns3::RandomPropagationDelayModel const &', 'arg0')]) ## propagation-delay-model.h (module 'propagation'): ns3::RandomPropagationDelayModel::RandomPropagationDelayModel() [constructor] cls.add_constructor([]) ## propagation-delay-model.h (module 'propagation'): ns3::Time ns3::RandomPropagationDelayModel::GetDelay(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('GetDelay', 'ns3::Time', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_virtual=True) ## propagation-delay-model.h (module 'propagation'): static ns3::TypeId ns3::RandomPropagationDelayModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-delay-model.h (module 'propagation'): int64_t ns3::RandomPropagationDelayModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True, visibility='private') return def register_Ns3RandomPropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::RandomPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::RandomPropagationLossModel::RandomPropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): double ns3::RandomPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_virtual=True, visibility='private') ## propagation-loss-model.h (module 'propagation'): int64_t ns3::RandomPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True, visibility='private') return def register_Ns3RandomVariableStream_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::RandomVariableStream::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::RandomVariableStream::RandomVariableStream() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetStream(int64_t stream) [member function] cls.add_method('SetStream', 'void', [param('int64_t', 'stream')]) ## random-variable-stream.h (module 'core'): int64_t ns3::RandomVariableStream::GetStream() const [member function] cls.add_method('GetStream', 'int64_t', [], is_const=True) ## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetAntithetic(bool isAntithetic) [member function] cls.add_method('SetAntithetic', 'void', [param('bool', 'isAntithetic')]) ## random-variable-stream.h (module 'core'): bool ns3::RandomVariableStream::IsAntithetic() const [member function] cls.add_method('IsAntithetic', 'bool', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::RandomVariableStream::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_pure_virtual=True, is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::RandomVariableStream::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_pure_virtual=True, is_virtual=True) ## random-variable-stream.h (module 'core'): ns3::RngStream * ns3::RandomVariableStream::Peek() const [member function] cls.add_method('Peek', 'ns3::RngStream *', [], is_const=True, visibility='protected') return def register_Ns3RangePropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::RangePropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::RangePropagationLossModel::RangePropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): double ns3::RangePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_virtual=True, visibility='private') ## propagation-loss-model.h (module 'propagation'): int64_t ns3::RangePropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True, visibility='private') return def register_Ns3SequentialRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::SequentialRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable::SequentialRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMin() const [member function] cls.add_method('GetMin', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMax() const [member function] cls.add_method('GetMax', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): ns3::Ptr<ns3::RandomVariableStream> ns3::SequentialRandomVariable::GetIncrement() const [member function] cls.add_method('GetIncrement', 'ns3::Ptr< ns3::RandomVariableStream >', [], is_const=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetConsecutive() const [member function] cls.add_method('GetConsecutive', 'uint32_t', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')]) return def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')]) return def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')]) return def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')]) return def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')]) return def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')]) return def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls): ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor] cls.add_constructor([]) ## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [constructor] cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')]) return def register_Ns3ThreeGppPropagationLossModel_methods(root_module, cls): ## three-gpp-propagation-loss-model.h (module 'propagation'): ns3::ThreeGppPropagationLossModel::ThreeGppPropagationLossModel() [constructor] cls.add_constructor([]) ## three-gpp-propagation-loss-model.h (module 'propagation'): ns3::Ptr<ns3::ChannelConditionModel> ns3::ThreeGppPropagationLossModel::GetChannelConditionModel() const [member function] cls.add_method('GetChannelConditionModel', 'ns3::Ptr< ns3::ChannelConditionModel >', [], is_const=True) ## three-gpp-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppPropagationLossModel::GetFrequency() const [member function] cls.add_method('GetFrequency', 'double', [], is_const=True) ## three-gpp-propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::ThreeGppPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## three-gpp-propagation-loss-model.h (module 'propagation'): void ns3::ThreeGppPropagationLossModel::SetChannelConditionModel(ns3::Ptr<ns3::ChannelConditionModel> model) [member function] cls.add_method('SetChannelConditionModel', 'void', [param('ns3::Ptr< ns3::ChannelConditionModel >', 'model')]) ## three-gpp-propagation-loss-model.h (module 'propagation'): void ns3::ThreeGppPropagationLossModel::SetFrequency(double f) [member function] cls.add_method('SetFrequency', 'void', [param('double', 'f')]) ## three-gpp-propagation-loss-model.h (module 'propagation'): static double ns3::ThreeGppPropagationLossModel::Calculate2dDistance(ns3::Vector a, ns3::Vector b) [member function] cls.add_method('Calculate2dDistance', 'double', [param('ns3::Vector3D', 'a'), param('ns3::Vector3D', 'b')], is_static=True, visibility='protected') ## three-gpp-propagation-loss-model.h (module 'propagation'): void ns3::ThreeGppPropagationLossModel::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], is_virtual=True, visibility='protected') ## three-gpp-propagation-loss-model.h (module 'propagation'): int64_t ns3::ThreeGppPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True, visibility='private') ## three-gpp-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_virtual=True, visibility='private') ## three-gpp-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppPropagationLossModel::GetLossLos(double distance2D, double distance3D, double hUt, double hBs) const [member function] cls.add_method('GetLossLos', 'double', [param('double', 'distance2D'), param('double', 'distance3D'), param('double', 'hUt'), param('double', 'hBs')], is_const=True, is_pure_virtual=True, is_virtual=True, visibility='private') ## three-gpp-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppPropagationLossModel::GetLossNlos(double distance2D, double distance3D, double hUt, double hBs) const [member function] cls.add_method('GetLossNlos', 'double', [param('double', 'distance2D'), param('double', 'distance3D'), param('double', 'hUt'), param('double', 'hBs')], is_const=True, is_pure_virtual=True, is_virtual=True, visibility='private') ## three-gpp-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppPropagationLossModel::GetLossNlosv(double distance2D, double distance3D, double hUt, double hBs) const [member function] cls.add_method('GetLossNlosv', 'double', [param('double', 'distance2D'), param('double', 'distance3D'), param('double', 'hUt'), param('double', 'hBs')], is_const=True, is_virtual=True, visibility='private') ## three-gpp-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppPropagationLossModel::GetShadowingCorrelationDistance(ns3::ChannelCondition::LosConditionValue cond) const [member function] cls.add_method('GetShadowingCorrelationDistance', 'double', [param('ns3::ChannelCondition::LosConditionValue', 'cond')], is_const=True, is_pure_virtual=True, is_virtual=True, visibility='private') ## three-gpp-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppPropagationLossModel::GetShadowingStd(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, ns3::ChannelCondition::LosConditionValue cond) const [member function] cls.add_method('GetShadowingStd', 'double', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('ns3::ChannelCondition::LosConditionValue', 'cond')], is_const=True, is_pure_virtual=True, is_virtual=True, visibility='private') ## three-gpp-propagation-loss-model.h (module 'propagation'): std::pair<double, double> ns3::ThreeGppPropagationLossModel::GetUtAndBsHeights(double za, double zb) const [member function] cls.add_method('GetUtAndBsHeights', 'std::pair< double, double >', [param('double', 'za'), param('double', 'zb')], is_const=True, is_virtual=True, visibility='private') return def register_Ns3ThreeGppRmaPropagationLossModel_methods(root_module, cls): ## three-gpp-propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::ThreeGppRmaPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## three-gpp-propagation-loss-model.h (module 'propagation'): ns3::ThreeGppRmaPropagationLossModel::ThreeGppRmaPropagationLossModel() [constructor] cls.add_constructor([]) ## three-gpp-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppRmaPropagationLossModel::GetLossLos(double distance2D, double distance3D, double hUt, double hBs) const [member function] cls.add_method('GetLossLos', 'double', [param('double', 'distance2D'), param('double', 'distance3D'), param('double', 'hUt'), param('double', 'hBs')], is_const=True, is_virtual=True, visibility='private') ## three-gpp-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppRmaPropagationLossModel::GetLossNlos(double distance2D, double distance3D, double hUt, double hBs) const [member function] cls.add_method('GetLossNlos', 'double', [param('double', 'distance2D'), param('double', 'distance3D'), param('double', 'hUt'), param('double', 'hBs')], is_const=True, is_virtual=True, visibility='private') ## three-gpp-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppRmaPropagationLossModel::GetShadowingStd(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, ns3::ChannelCondition::LosConditionValue cond) const [member function] cls.add_method('GetShadowingStd', 'double', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('ns3::ChannelCondition::LosConditionValue', 'cond')], is_const=True, is_virtual=True, visibility='private') ## three-gpp-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppRmaPropagationLossModel::GetShadowingCorrelationDistance(ns3::ChannelCondition::LosConditionValue cond) const [member function] cls.add_method('GetShadowingCorrelationDistance', 'double', [param('ns3::ChannelCondition::LosConditionValue', 'cond')], is_const=True, is_virtual=True, visibility='private') return def register_Ns3ThreeGppUmaPropagationLossModel_methods(root_module, cls): ## three-gpp-propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::ThreeGppUmaPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## three-gpp-propagation-loss-model.h (module 'propagation'): ns3::ThreeGppUmaPropagationLossModel::ThreeGppUmaPropagationLossModel() [constructor] cls.add_constructor([]) ## three-gpp-propagation-loss-model.h (module 'propagation'): int64_t ns3::ThreeGppUmaPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True, visibility='private') ## three-gpp-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppUmaPropagationLossModel::GetLossLos(double distance2D, double distance3D, double hUt, double hBs) const [member function] cls.add_method('GetLossLos', 'double', [param('double', 'distance2D'), param('double', 'distance3D'), param('double', 'hUt'), param('double', 'hBs')], is_const=True, is_virtual=True, visibility='private') ## three-gpp-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppUmaPropagationLossModel::GetLossNlos(double distance2D, double distance3D, double hUt, double hBs) const [member function] cls.add_method('GetLossNlos', 'double', [param('double', 'distance2D'), param('double', 'distance3D'), param('double', 'hUt'), param('double', 'hBs')], is_const=True, is_virtual=True, visibility='private') ## three-gpp-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppUmaPropagationLossModel::GetShadowingStd(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, ns3::ChannelCondition::LosConditionValue cond) const [member function] cls.add_method('GetShadowingStd', 'double', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('ns3::ChannelCondition::LosConditionValue', 'cond')], is_const=True, is_virtual=True, visibility='private') ## three-gpp-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppUmaPropagationLossModel::GetShadowingCorrelationDistance(ns3::ChannelCondition::LosConditionValue cond) const [member function] cls.add_method('GetShadowingCorrelationDistance', 'double', [param('ns3::ChannelCondition::LosConditionValue', 'cond')], is_const=True, is_virtual=True, visibility='private') return def register_Ns3ThreeGppUmiStreetCanyonPropagationLossModel_methods(root_module, cls): ## three-gpp-propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::ThreeGppUmiStreetCanyonPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## three-gpp-propagation-loss-model.h (module 'propagation'): ns3::ThreeGppUmiStreetCanyonPropagationLossModel::ThreeGppUmiStreetCanyonPropagationLossModel() [constructor] cls.add_constructor([]) ## three-gpp-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppUmiStreetCanyonPropagationLossModel::GetLossLos(double distance2D, double distance3D, double hUt, double hBs) const [member function] cls.add_method('GetLossLos', 'double', [param('double', 'distance2D'), param('double', 'distance3D'), param('double', 'hUt'), param('double', 'hBs')], is_const=True, is_virtual=True, visibility='private') ## three-gpp-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppUmiStreetCanyonPropagationLossModel::GetLossNlos(double distance2D, double distance3D, double hUt, double hBs) const [member function] cls.add_method('GetLossNlos', 'double', [param('double', 'distance2D'), param('double', 'distance3D'), param('double', 'hUt'), param('double', 'hBs')], is_const=True, is_virtual=True, visibility='private') ## three-gpp-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppUmiStreetCanyonPropagationLossModel::GetShadowingStd(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, ns3::ChannelCondition::LosConditionValue cond) const [member function] cls.add_method('GetShadowingStd', 'double', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('ns3::ChannelCondition::LosConditionValue', 'cond')], is_const=True, is_virtual=True, visibility='private') ## three-gpp-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppUmiStreetCanyonPropagationLossModel::GetShadowingCorrelationDistance(ns3::ChannelCondition::LosConditionValue cond) const [member function] cls.add_method('GetShadowingCorrelationDistance', 'double', [param('ns3::ChannelCondition::LosConditionValue', 'cond')], is_const=True, is_virtual=True, visibility='private') ## three-gpp-propagation-loss-model.h (module 'propagation'): std::pair<double, double> ns3::ThreeGppUmiStreetCanyonPropagationLossModel::GetUtAndBsHeights(double za, double zb) const [member function] cls.add_method('GetUtAndBsHeights', 'std::pair< double, double >', [param('double', 'za'), param('double', 'zb')], is_const=True, is_virtual=True, visibility='private') return def register_Ns3ThreeGppV2vUrbanPropagationLossModel_methods(root_module, cls): ## three-gpp-v2v-propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::ThreeGppV2vUrbanPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## three-gpp-v2v-propagation-loss-model.h (module 'propagation'): ns3::ThreeGppV2vUrbanPropagationLossModel::ThreeGppV2vUrbanPropagationLossModel() [constructor] cls.add_constructor([]) ## three-gpp-v2v-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppV2vUrbanPropagationLossModel::GetLossLos(double distance2D, double distance3D, double hUt, double hBs) const [member function] cls.add_method('GetLossLos', 'double', [param('double', 'distance2D'), param('double', 'distance3D'), param('double', 'hUt'), param('double', 'hBs')], is_const=True, is_virtual=True, visibility='private') ## three-gpp-v2v-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppV2vUrbanPropagationLossModel::GetLossNlosv(double distance2D, double distance3D, double hUt, double hBs) const [member function] cls.add_method('GetLossNlosv', 'double', [param('double', 'distance2D'), param('double', 'distance3D'), param('double', 'hUt'), param('double', 'hBs')], is_const=True, is_virtual=True, visibility='private') ## three-gpp-v2v-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppV2vUrbanPropagationLossModel::GetLossNlos(double distance2D, double distance3D, double hUt, double hBs) const [member function] cls.add_method('GetLossNlos', 'double', [param('double', 'distance2D'), param('double', 'distance3D'), param('double', 'hUt'), param('double', 'hBs')], is_const=True, is_virtual=True, visibility='private') ## three-gpp-v2v-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppV2vUrbanPropagationLossModel::GetShadowingStd(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, ns3::ChannelCondition::LosConditionValue cond) const [member function] cls.add_method('GetShadowingStd', 'double', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('ns3::ChannelCondition::LosConditionValue', 'cond')], is_const=True, is_virtual=True, visibility='private') ## three-gpp-v2v-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppV2vUrbanPropagationLossModel::GetShadowingCorrelationDistance(ns3::ChannelCondition::LosConditionValue cond) const [member function] cls.add_method('GetShadowingCorrelationDistance', 'double', [param('ns3::ChannelCondition::LosConditionValue', 'cond')], is_const=True, is_virtual=True, visibility='private') return def register_Ns3ThreeLogDistancePropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::ThreeLogDistancePropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::ThreeLogDistancePropagationLossModel::ThreeLogDistancePropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): double ns3::ThreeLogDistancePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_virtual=True, visibility='private') ## propagation-loss-model.h (module 'propagation'): int64_t ns3::ThreeLogDistancePropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True, visibility='private') return def register_Ns3TraceSourceAccessor_methods(root_module, cls): ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [constructor] cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')]) ## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor] cls.add_constructor([]) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Connect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_const=True, is_pure_virtual=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('ConnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_const=True, is_pure_virtual=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function] cls.add_method('Disconnect', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')], is_const=True, is_pure_virtual=True, is_virtual=True) ## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function] cls.add_method('DisconnectWithoutContext', 'bool', [param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')], is_const=True, is_pure_virtual=True, is_virtual=True) return def register_Ns3TriangularRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::TriangularRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable::TriangularRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMean() const [member function] cls.add_method('GetMean', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMin() const [member function] cls.add_method('GetMin', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMax() const [member function] cls.add_method('GetMax', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue(double mean, double min, double max) [member function] cls.add_method('GetValue', 'double', [param('double', 'mean'), param('double', 'min'), param('double', 'max')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger(uint32_t mean, uint32_t min, uint32_t max) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mean'), param('uint32_t', 'min'), param('uint32_t', 'max')]) ## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3TwoRayGroundPropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::TwoRayGroundPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::TwoRayGroundPropagationLossModel::TwoRayGroundPropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetFrequency(double frequency) [member function] cls.add_method('SetFrequency', 'void', [param('double', 'frequency')]) ## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetSystemLoss(double systemLoss) [member function] cls.add_method('SetSystemLoss', 'void', [param('double', 'systemLoss')]) ## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetMinDistance(double minDistance) [member function] cls.add_method('SetMinDistance', 'void', [param('double', 'minDistance')]) ## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::GetMinDistance() const [member function] cls.add_method('GetMinDistance', 'double', [], is_const=True) ## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::GetFrequency() const [member function] cls.add_method('GetFrequency', 'double', [], is_const=True) ## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::GetSystemLoss() const [member function] cls.add_method('GetSystemLoss', 'double', [], is_const=True) ## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetHeightAboveZ(double heightAboveZ) [member function] cls.add_method('SetHeightAboveZ', 'void', [param('double', 'heightAboveZ')]) ## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_virtual=True, visibility='private') ## propagation-loss-model.h (module 'propagation'): int64_t ns3::TwoRayGroundPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True, visibility='private') return def register_Ns3UniformRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::UniformRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable::UniformRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMin() const [member function] cls.add_method('GetMin', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMax() const [member function] cls.add_method('GetMax', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue(double min, double max) [member function] cls.add_method('GetValue', 'double', [param('double', 'min'), param('double', 'max')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger(uint32_t min, uint32_t max) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'min'), param('uint32_t', 'max')]) ## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3WeibullRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::WeibullRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable::WeibullRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetScale() const [member function] cls.add_method('GetScale', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetShape() const [member function] cls.add_method('GetShape', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetBound() const [member function] cls.add_method('GetBound', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue(double scale, double shape, double bound) [member function] cls.add_method('GetValue', 'double', [param('double', 'scale'), param('double', 'shape'), param('double', 'bound')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger(uint32_t scale, uint32_t shape, uint32_t bound) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'scale'), param('uint32_t', 'shape'), param('uint32_t', 'bound')]) ## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3ZetaRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZetaRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable::ZetaRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetAlpha() const [member function] cls.add_method('GetAlpha', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue(double alpha) [member function] cls.add_method('GetValue', 'double', [param('double', 'alpha')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger(uint32_t alpha) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'alpha')]) ## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3ZipfRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZipfRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable::ZipfRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetN() const [member function] cls.add_method('GetN', 'uint32_t', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetAlpha() const [member function] cls.add_method('GetAlpha', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue(uint32_t n, double alpha) [member function] cls.add_method('GetValue', 'double', [param('uint32_t', 'n'), param('double', 'alpha')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger(uint32_t n, uint32_t alpha) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'n'), param('uint32_t', 'alpha')]) ## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3AttributeAccessor_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [constructor] cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function] cls.add_method('Get', 'bool', [param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')], is_const=True, is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function] cls.add_method('HasGetter', 'bool', [], is_const=True, is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function] cls.add_method('HasSetter', 'bool', [], is_const=True, is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function] cls.add_method('Set', 'bool', [param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')], is_const=True, is_pure_virtual=True, is_virtual=True) return def register_Ns3AttributeChecker_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [constructor] cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_const=True, is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')], is_const=True, is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function] cls.add_method('CreateValidValue', 'ns3::Ptr< ns3::AttributeValue >', [param('ns3::AttributeValue const &', 'value')], is_const=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_const=True, is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_const=True, is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_const=True, is_pure_virtual=True, is_virtual=True) return def register_Ns3AttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [constructor] cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_pure_virtual=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_pure_virtual=True, is_virtual=True) return def register_Ns3CallbackChecker_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [constructor] cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')]) return def register_Ns3CallbackImplBase_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [constructor] cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')]) ## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function] cls.add_method('GetTypeid', 'std::string', [], is_const=True, is_pure_virtual=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<const ns3::CallbackImplBase> other) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')], is_const=True, is_pure_virtual=True, is_virtual=True) ## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function] cls.add_method('Demangle', 'std::string', [param('std::string const &', 'mangled')], is_static=True, visibility='protected') ## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function] cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, template_parameters=['ns3::ObjectBase*'], visibility='protected') ## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function] cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, template_parameters=['void'], visibility='protected') ## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function] cls.add_method('GetCppTypeid', 'std::string', [], is_static=True, template_parameters=['ns3::Ptr<ns3::MobilityModel const> '], visibility='protected') return def register_Ns3CallbackValue_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [constructor] cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor] cls.add_constructor([param('ns3::CallbackBase const &', 'base')]) ## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function] cls.add_method('Set', 'void', [param('ns3::CallbackBase', 'base')]) return def register_Ns3ChannelCondition_methods(root_module, cls): ## channel-condition-model.h (module 'propagation'): ns3::ChannelCondition::ChannelCondition(ns3::ChannelCondition const & arg0) [constructor] cls.add_constructor([param('ns3::ChannelCondition const &', 'arg0')]) ## channel-condition-model.h (module 'propagation'): ns3::ChannelCondition::ChannelCondition() [constructor] cls.add_constructor([]) ## channel-condition-model.h (module 'propagation'): ns3::ChannelCondition::ChannelCondition(ns3::ChannelCondition::LosConditionValue losCondition, ns3::ChannelCondition::O2iConditionValue o2iCondition=::ns3::ChannelCondition::O2iConditionValue::O2O) [constructor] cls.add_constructor([param('ns3::ChannelCondition::LosConditionValue', 'losCondition'), param('ns3::ChannelCondition::O2iConditionValue', 'o2iCondition', default_value='::ns3::ChannelCondition::O2iConditionValue::O2O')]) ## channel-condition-model.h (module 'propagation'): ns3::ChannelCondition::LosConditionValue ns3::ChannelCondition::GetLosCondition() const [member function] cls.add_method('GetLosCondition', 'ns3::ChannelCondition::LosConditionValue', [], is_const=True) ## channel-condition-model.h (module 'propagation'): ns3::ChannelCondition::O2iConditionValue ns3::ChannelCondition::GetO2iCondition() const [member function] cls.add_method('GetO2iCondition', 'ns3::ChannelCondition::O2iConditionValue', [], is_const=True) ## channel-condition-model.h (module 'propagation'): static ns3::TypeId ns3::ChannelCondition::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## channel-condition-model.h (module 'propagation'): bool ns3::ChannelCondition::IsEqual(ns3::Ptr<const ns3::ChannelCondition> otherCondition) const [member function] cls.add_method('IsEqual', 'bool', [param('ns3::Ptr< ns3::ChannelCondition const >', 'otherCondition')], is_const=True) ## channel-condition-model.h (module 'propagation'): bool ns3::ChannelCondition::IsI2i() const [member function] cls.add_method('IsI2i', 'bool', [], is_const=True) ## channel-condition-model.h (module 'propagation'): bool ns3::ChannelCondition::IsLos() const [member function] cls.add_method('IsLos', 'bool', [], is_const=True) ## channel-condition-model.h (module 'propagation'): bool ns3::ChannelCondition::IsNlos() const [member function] cls.add_method('IsNlos', 'bool', [], is_const=True) ## channel-condition-model.h (module 'propagation'): bool ns3::ChannelCondition::IsNlosv() const [member function] cls.add_method('IsNlosv', 'bool', [], is_const=True) ## channel-condition-model.h (module 'propagation'): bool ns3::ChannelCondition::IsO2i() const [member function] cls.add_method('IsO2i', 'bool', [], is_const=True) ## channel-condition-model.h (module 'propagation'): bool ns3::ChannelCondition::IsO2o() const [member function] cls.add_method('IsO2o', 'bool', [], is_const=True) ## channel-condition-model.h (module 'propagation'): void ns3::ChannelCondition::SetLosCondition(ns3::ChannelCondition::LosConditionValue losCondition) [member function] cls.add_method('SetLosCondition', 'void', [param('ns3::ChannelCondition::LosConditionValue', 'losCondition')]) ## channel-condition-model.h (module 'propagation'): void ns3::ChannelCondition::SetO2iCondition(ns3::ChannelCondition::O2iConditionValue o2iCondition) [member function] cls.add_method('SetO2iCondition', 'void', [param('ns3::ChannelCondition::O2iConditionValue', 'o2iCondition')]) return def register_Ns3ChannelConditionModel_methods(root_module, cls): ## channel-condition-model.h (module 'propagation'): static ns3::TypeId ns3::ChannelConditionModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## channel-condition-model.h (module 'propagation'): ns3::ChannelConditionModel::ChannelConditionModel() [constructor] cls.add_constructor([]) ## channel-condition-model.h (module 'propagation'): ns3::Ptr<ns3::ChannelCondition> ns3::ChannelConditionModel::GetChannelCondition(ns3::Ptr<const ns3::MobilityModel> a, ns3::Ptr<const ns3::MobilityModel> b) const [member function] cls.add_method('GetChannelCondition', 'ns3::Ptr< ns3::ChannelCondition >', [param('ns3::Ptr< ns3::MobilityModel const >', 'a'), param('ns3::Ptr< ns3::MobilityModel const >', 'b')], is_const=True, is_pure_virtual=True, is_virtual=True) ## channel-condition-model.h (module 'propagation'): int64_t ns3::ChannelConditionModel::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_pure_virtual=True, is_virtual=True) return def register_Ns3ConstantRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ConstantRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable::ConstantRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetConstant() const [member function] cls.add_method('GetConstant', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue(double constant) [member function] cls.add_method('GetValue', 'double', [param('double', 'constant')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger(uint32_t constant) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'constant')]) ## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3ConstantSpeedPropagationDelayModel_methods(root_module, cls): ## propagation-delay-model.h (module 'propagation'): ns3::ConstantSpeedPropagationDelayModel::ConstantSpeedPropagationDelayModel(ns3::ConstantSpeedPropagationDelayModel const & arg0) [constructor] cls.add_constructor([param('ns3::ConstantSpeedPropagationDelayModel const &', 'arg0')]) ## propagation-delay-model.h (module 'propagation'): ns3::ConstantSpeedPropagationDelayModel::ConstantSpeedPropagationDelayModel() [constructor] cls.add_constructor([]) ## propagation-delay-model.h (module 'propagation'): ns3::Time ns3::ConstantSpeedPropagationDelayModel::GetDelay(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('GetDelay', 'ns3::Time', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_virtual=True) ## propagation-delay-model.h (module 'propagation'): double ns3::ConstantSpeedPropagationDelayModel::GetSpeed() const [member function] cls.add_method('GetSpeed', 'double', [], is_const=True) ## propagation-delay-model.h (module 'propagation'): static ns3::TypeId ns3::ConstantSpeedPropagationDelayModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-delay-model.h (module 'propagation'): void ns3::ConstantSpeedPropagationDelayModel::SetSpeed(double speed) [member function] cls.add_method('SetSpeed', 'void', [param('double', 'speed')]) ## propagation-delay-model.h (module 'propagation'): int64_t ns3::ConstantSpeedPropagationDelayModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True, visibility='private') return def register_Ns3Cost231PropagationLossModel_methods(root_module, cls): ## cost231-propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::Cost231PropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## cost231-propagation-loss-model.h (module 'propagation'): ns3::Cost231PropagationLossModel::Cost231PropagationLossModel() [constructor] cls.add_constructor([]) ## cost231-propagation-loss-model.h (module 'propagation'): double ns3::Cost231PropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('GetLoss', 'double', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True) ## cost231-propagation-loss-model.h (module 'propagation'): void ns3::Cost231PropagationLossModel::SetBSAntennaHeight(double height) [member function] cls.add_method('SetBSAntennaHeight', 'void', [param('double', 'height')]) ## cost231-propagation-loss-model.h (module 'propagation'): void ns3::Cost231PropagationLossModel::SetSSAntennaHeight(double height) [member function] cls.add_method('SetSSAntennaHeight', 'void', [param('double', 'height')]) ## cost231-propagation-loss-model.h (module 'propagation'): void ns3::Cost231PropagationLossModel::SetLambda(double lambda) [member function] cls.add_method('SetLambda', 'void', [param('double', 'lambda')]) ## cost231-propagation-loss-model.h (module 'propagation'): void ns3::Cost231PropagationLossModel::SetLambda(double frequency, double speed) [member function] cls.add_method('SetLambda', 'void', [param('double', 'frequency'), param('double', 'speed')]) ## cost231-propagation-loss-model.h (module 'propagation'): void ns3::Cost231PropagationLossModel::SetMinDistance(double minDistance) [member function] cls.add_method('SetMinDistance', 'void', [param('double', 'minDistance')]) ## cost231-propagation-loss-model.h (module 'propagation'): double ns3::Cost231PropagationLossModel::GetBSAntennaHeight() const [member function] cls.add_method('GetBSAntennaHeight', 'double', [], is_const=True) ## cost231-propagation-loss-model.h (module 'propagation'): double ns3::Cost231PropagationLossModel::GetSSAntennaHeight() const [member function] cls.add_method('GetSSAntennaHeight', 'double', [], is_const=True) ## cost231-propagation-loss-model.h (module 'propagation'): double ns3::Cost231PropagationLossModel::GetMinDistance() const [member function] cls.add_method('GetMinDistance', 'double', [], is_const=True) ## cost231-propagation-loss-model.h (module 'propagation'): double ns3::Cost231PropagationLossModel::GetLambda() const [member function] cls.add_method('GetLambda', 'double', [], is_const=True) ## cost231-propagation-loss-model.h (module 'propagation'): double ns3::Cost231PropagationLossModel::GetShadowing() [member function] cls.add_method('GetShadowing', 'double', []) ## cost231-propagation-loss-model.h (module 'propagation'): void ns3::Cost231PropagationLossModel::SetShadowing(double shadowing) [member function] cls.add_method('SetShadowing', 'void', [param('double', 'shadowing')]) ## cost231-propagation-loss-model.h (module 'propagation'): double ns3::Cost231PropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_virtual=True, visibility='private') ## cost231-propagation-loss-model.h (module 'propagation'): int64_t ns3::Cost231PropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True, visibility='private') return def register_Ns3DeterministicRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::DeterministicRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable::DeterministicRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): void ns3::DeterministicRandomVariable::SetValueArray(double * values, std::size_t length) [member function] cls.add_method('SetValueArray', 'void', [param('double *', 'values'), param('std::size_t', 'length')]) ## random-variable-stream.h (module 'core'): double ns3::DeterministicRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::DeterministicRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3EmpiricalRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable::EmpiricalRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::CDF(double v, double c) [member function] cls.add_method('CDF', 'void', [param('double', 'v'), param('double', 'c')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::EmpiricalRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::EmpiricalRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::Interpolate() [member function] cls.add_method('Interpolate', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): bool ns3::EmpiricalRandomVariable::SetInterpolate(bool interpolate) [member function] cls.add_method('SetInterpolate', 'bool', [param('bool', 'interpolate')]) return def register_Ns3EmptyAttributeAccessor_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor(ns3::EmptyAttributeAccessor const & arg0) [constructor] cls.add_constructor([param('ns3::EmptyAttributeAccessor const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function] cls.add_method('Get', 'bool', [param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasGetter() const [member function] cls.add_method('HasGetter', 'bool', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasSetter() const [member function] cls.add_method('HasSetter', 'bool', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function] cls.add_method('Set', 'bool', [param('ns3::ObjectBase *', 'object'), param('ns3::AttributeValue const &', 'value')], is_const=True, is_virtual=True) return def register_Ns3EmptyAttributeChecker_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker(ns3::EmptyAttributeChecker const & arg0) [constructor] cls.add_constructor([param('ns3::EmptyAttributeChecker const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Check(ns3::AttributeValue const & value) const [member function] cls.add_method('Check', 'bool', [param('ns3::AttributeValue const &', 'value')], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function] cls.add_method('Copy', 'bool', [param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')], is_const=True, is_virtual=True) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeChecker::Create() const [member function] cls.add_method('Create', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetUnderlyingTypeInformation() const [member function] cls.add_method('GetUnderlyingTypeInformation', 'std::string', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetValueTypeName() const [member function] cls.add_method('GetValueTypeName', 'std::string', [], is_const=True, is_virtual=True) ## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::HasUnderlyingTypeInformation() const [member function] cls.add_method('HasUnderlyingTypeInformation', 'bool', [], is_const=True, is_virtual=True) return def register_Ns3EmptyAttributeValue_methods(root_module, cls): ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [constructor] cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')]) ## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor] cls.add_constructor([]) ## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True, visibility='private') ## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True, visibility='private') ## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True, visibility='private') return def register_Ns3ErlangRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ErlangRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable::ErlangRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetK() const [member function] cls.add_method('GetK', 'uint32_t', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetLambda() const [member function] cls.add_method('GetLambda', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue(uint32_t k, double lambda) [member function] cls.add_method('GetValue', 'double', [param('uint32_t', 'k'), param('double', 'lambda')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger(uint32_t k, uint32_t lambda) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'k'), param('uint32_t', 'lambda')]) ## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3EventImpl_methods(root_module, cls): ## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [constructor] cls.add_constructor([param('ns3::EventImpl const &', 'arg0')]) ## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor] cls.add_constructor([]) ## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function] cls.add_method('Cancel', 'void', []) ## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function] cls.add_method('Invoke', 'void', []) ## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function] cls.add_method('IsCancelled', 'bool', []) ## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function] cls.add_method('Notify', 'void', [], is_pure_virtual=True, is_virtual=True, visibility='protected') return def register_Ns3ExponentialRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ExponentialRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable::ExponentialRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetMean() const [member function] cls.add_method('GetMean', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetBound() const [member function] cls.add_method('GetBound', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue(double mean, double bound) [member function] cls.add_method('GetValue', 'double', [param('double', 'mean'), param('double', 'bound')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger(uint32_t mean, uint32_t bound) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mean'), param('uint32_t', 'bound')]) ## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3FixedRssLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::FixedRssLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::FixedRssLossModel::FixedRssLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): void ns3::FixedRssLossModel::SetRss(double rss) [member function] cls.add_method('SetRss', 'void', [param('double', 'rss')]) ## propagation-loss-model.h (module 'propagation'): double ns3::FixedRssLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_virtual=True, visibility='private') ## propagation-loss-model.h (module 'propagation'): int64_t ns3::FixedRssLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True, visibility='private') return def register_Ns3FriisPropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::FriisPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::FriisPropagationLossModel::FriisPropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetFrequency(double frequency) [member function] cls.add_method('SetFrequency', 'void', [param('double', 'frequency')]) ## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetSystemLoss(double systemLoss) [member function] cls.add_method('SetSystemLoss', 'void', [param('double', 'systemLoss')]) ## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetMinLoss(double minLoss) [member function] cls.add_method('SetMinLoss', 'void', [param('double', 'minLoss')]) ## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::GetMinLoss() const [member function] cls.add_method('GetMinLoss', 'double', [], is_const=True) ## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::GetFrequency() const [member function] cls.add_method('GetFrequency', 'double', [], is_const=True) ## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::GetSystemLoss() const [member function] cls.add_method('GetSystemLoss', 'double', [], is_const=True) ## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_virtual=True, visibility='private') ## propagation-loss-model.h (module 'propagation'): int64_t ns3::FriisPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True, visibility='private') return def register_Ns3GammaRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::GammaRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable::GammaRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetAlpha() const [member function] cls.add_method('GetAlpha', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetBeta() const [member function] cls.add_method('GetBeta', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue(double alpha, double beta) [member function] cls.add_method('GetValue', 'double', [param('double', 'alpha'), param('double', 'beta')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger(uint32_t alpha, uint32_t beta) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'alpha'), param('uint32_t', 'beta')]) ## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3ItuR1411LosPropagationLossModel_methods(root_module, cls): ## itu-r-1411-los-propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::ItuR1411LosPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## itu-r-1411-los-propagation-loss-model.h (module 'propagation'): ns3::ItuR1411LosPropagationLossModel::ItuR1411LosPropagationLossModel() [constructor] cls.add_constructor([]) ## itu-r-1411-los-propagation-loss-model.h (module 'propagation'): void ns3::ItuR1411LosPropagationLossModel::SetFrequency(double freq) [member function] cls.add_method('SetFrequency', 'void', [param('double', 'freq')]) ## itu-r-1411-los-propagation-loss-model.h (module 'propagation'): double ns3::ItuR1411LosPropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('GetLoss', 'double', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True) ## itu-r-1411-los-propagation-loss-model.h (module 'propagation'): double ns3::ItuR1411LosPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_virtual=True, visibility='private') ## itu-r-1411-los-propagation-loss-model.h (module 'propagation'): int64_t ns3::ItuR1411LosPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True, visibility='private') return def register_Ns3ItuR1411NlosOverRooftopPropagationLossModel_methods(root_module, cls): ## itu-r-1411-nlos-over-rooftop-propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::ItuR1411NlosOverRooftopPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## itu-r-1411-nlos-over-rooftop-propagation-loss-model.h (module 'propagation'): ns3::ItuR1411NlosOverRooftopPropagationLossModel::ItuR1411NlosOverRooftopPropagationLossModel() [constructor] cls.add_constructor([]) ## itu-r-1411-nlos-over-rooftop-propagation-loss-model.h (module 'propagation'): void ns3::ItuR1411NlosOverRooftopPropagationLossModel::SetFrequency(double freq) [member function] cls.add_method('SetFrequency', 'void', [param('double', 'freq')]) ## itu-r-1411-nlos-over-rooftop-propagation-loss-model.h (module 'propagation'): double ns3::ItuR1411NlosOverRooftopPropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('GetLoss', 'double', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True) ## itu-r-1411-nlos-over-rooftop-propagation-loss-model.h (module 'propagation'): double ns3::ItuR1411NlosOverRooftopPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_virtual=True, visibility='private') ## itu-r-1411-nlos-over-rooftop-propagation-loss-model.h (module 'propagation'): int64_t ns3::ItuR1411NlosOverRooftopPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True, visibility='private') return def register_Ns3JakesProcess_methods(root_module, cls): ## jakes-process.h (module 'propagation'): ns3::JakesProcess::JakesProcess(ns3::JakesProcess const & arg0) [constructor] cls.add_constructor([param('ns3::JakesProcess const &', 'arg0')]) ## jakes-process.h (module 'propagation'): ns3::JakesProcess::JakesProcess() [constructor] cls.add_constructor([]) ## jakes-process.h (module 'propagation'): void ns3::JakesProcess::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], is_virtual=True) ## jakes-process.h (module 'propagation'): double ns3::JakesProcess::GetChannelGainDb() const [member function] cls.add_method('GetChannelGainDb', 'double', [], is_const=True) ## jakes-process.h (module 'propagation'): std::complex<double> ns3::JakesProcess::GetComplexGain() const [member function] cls.add_method('GetComplexGain', 'std::complex< double >', [], is_const=True) ## jakes-process.h (module 'propagation'): static ns3::TypeId ns3::JakesProcess::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## jakes-process.h (module 'propagation'): void ns3::JakesProcess::SetPropagationLossModel(ns3::Ptr<const ns3::PropagationLossModel> model) [member function] cls.add_method('SetPropagationLossModel', 'void', [param('ns3::Ptr< ns3::PropagationLossModel const >', 'model')]) return def register_Ns3JakesPropagationLossModel_methods(root_module, cls): ## jakes-propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::JakesPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## jakes-propagation-loss-model.h (module 'propagation'): ns3::JakesPropagationLossModel::JakesPropagationLossModel() [constructor] cls.add_constructor([]) ## jakes-propagation-loss-model.h (module 'propagation'): double ns3::JakesPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_virtual=True, visibility='private') ## jakes-propagation-loss-model.h (module 'propagation'): int64_t ns3::JakesPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True, visibility='private') return def register_Ns3Kun2600MhzPropagationLossModel_methods(root_module, cls): ## kun-2600-mhz-propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::Kun2600MhzPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## kun-2600-mhz-propagation-loss-model.h (module 'propagation'): ns3::Kun2600MhzPropagationLossModel::Kun2600MhzPropagationLossModel() [constructor] cls.add_constructor([]) ## kun-2600-mhz-propagation-loss-model.h (module 'propagation'): double ns3::Kun2600MhzPropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('GetLoss', 'double', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True) ## kun-2600-mhz-propagation-loss-model.h (module 'propagation'): double ns3::Kun2600MhzPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_virtual=True, visibility='private') ## kun-2600-mhz-propagation-loss-model.h (module 'propagation'): int64_t ns3::Kun2600MhzPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True, visibility='private') return def register_Ns3LogDistancePropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::LogDistancePropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::LogDistancePropagationLossModel::LogDistancePropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): void ns3::LogDistancePropagationLossModel::SetPathLossExponent(double n) [member function] cls.add_method('SetPathLossExponent', 'void', [param('double', 'n')]) ## propagation-loss-model.h (module 'propagation'): double ns3::LogDistancePropagationLossModel::GetPathLossExponent() const [member function] cls.add_method('GetPathLossExponent', 'double', [], is_const=True) ## propagation-loss-model.h (module 'propagation'): void ns3::LogDistancePropagationLossModel::SetReference(double referenceDistance, double referenceLoss) [member function] cls.add_method('SetReference', 'void', [param('double', 'referenceDistance'), param('double', 'referenceLoss')]) ## propagation-loss-model.h (module 'propagation'): double ns3::LogDistancePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_virtual=True, visibility='private') ## propagation-loss-model.h (module 'propagation'): int64_t ns3::LogDistancePropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True, visibility='private') return def register_Ns3LogNormalRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::LogNormalRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable::LogNormalRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetMu() const [member function] cls.add_method('GetMu', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetSigma() const [member function] cls.add_method('GetSigma', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue(double mu, double sigma) [member function] cls.add_method('GetValue', 'double', [param('double', 'mu'), param('double', 'sigma')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger(uint32_t mu, uint32_t sigma) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mu'), param('uint32_t', 'sigma')]) ## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3MatrixPropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::MatrixPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::MatrixPropagationLossModel::MatrixPropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): void ns3::MatrixPropagationLossModel::SetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, double loss, bool symmetric=true) [member function] cls.add_method('SetLoss', 'void', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('double', 'loss'), param('bool', 'symmetric', default_value='true')]) ## propagation-loss-model.h (module 'propagation'): void ns3::MatrixPropagationLossModel::SetDefaultLoss(double defaultLoss) [member function] cls.add_method('SetDefaultLoss', 'void', [param('double', 'defaultLoss')]) ## propagation-loss-model.h (module 'propagation'): double ns3::MatrixPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_virtual=True, visibility='private') ## propagation-loss-model.h (module 'propagation'): int64_t ns3::MatrixPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True, visibility='private') return def register_Ns3MobilityModel_methods(root_module, cls): ## mobility-model.h (module 'mobility'): ns3::MobilityModel::MobilityModel(ns3::MobilityModel const & arg0) [constructor] cls.add_constructor([param('ns3::MobilityModel const &', 'arg0')]) ## mobility-model.h (module 'mobility'): ns3::MobilityModel::MobilityModel() [constructor] cls.add_constructor([]) ## mobility-model.h (module 'mobility'): int64_t ns3::MobilityModel::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')]) ## mobility-model.h (module 'mobility'): double ns3::MobilityModel::GetDistanceFrom(ns3::Ptr<const ns3::MobilityModel> position) const [member function] cls.add_method('GetDistanceFrom', 'double', [param('ns3::Ptr< ns3::MobilityModel const >', 'position')], is_const=True) ## mobility-model.h (module 'mobility'): ns3::Vector ns3::MobilityModel::GetPosition() const [member function] cls.add_method('GetPosition', 'ns3::Vector', [], is_const=True) ## mobility-model.h (module 'mobility'): double ns3::MobilityModel::GetRelativeSpeed(ns3::Ptr<const ns3::MobilityModel> other) const [member function] cls.add_method('GetRelativeSpeed', 'double', [param('ns3::Ptr< ns3::MobilityModel const >', 'other')], is_const=True) ## mobility-model.h (module 'mobility'): static ns3::TypeId ns3::MobilityModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## mobility-model.h (module 'mobility'): ns3::Vector ns3::MobilityModel::GetVelocity() const [member function] cls.add_method('GetVelocity', 'ns3::Vector', [], is_const=True) ## mobility-model.h (module 'mobility'): void ns3::MobilityModel::SetPosition(ns3::Vector const & position) [member function] cls.add_method('SetPosition', 'void', [param('ns3::Vector const &', 'position')]) ## mobility-model.h (module 'mobility'): void ns3::MobilityModel::NotifyCourseChange() const [member function] cls.add_method('NotifyCourseChange', 'void', [], is_const=True, visibility='protected') ## mobility-model.h (module 'mobility'): int64_t ns3::MobilityModel::DoAssignStreams(int64_t start) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'start')], is_virtual=True, visibility='private') ## mobility-model.h (module 'mobility'): ns3::Vector ns3::MobilityModel::DoGetPosition() const [member function] cls.add_method('DoGetPosition', 'ns3::Vector', [], is_const=True, is_pure_virtual=True, is_virtual=True, visibility='private') ## mobility-model.h (module 'mobility'): ns3::Vector ns3::MobilityModel::DoGetVelocity() const [member function] cls.add_method('DoGetVelocity', 'ns3::Vector', [], is_const=True, is_pure_virtual=True, is_virtual=True, visibility='private') ## mobility-model.h (module 'mobility'): void ns3::MobilityModel::DoSetPosition(ns3::Vector const & position) [member function] cls.add_method('DoSetPosition', 'void', [param('ns3::Vector const &', 'position')], is_pure_virtual=True, is_virtual=True, visibility='private') return def register_Ns3NakagamiPropagationLossModel_methods(root_module, cls): ## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::NakagamiPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## propagation-loss-model.h (module 'propagation'): ns3::NakagamiPropagationLossModel::NakagamiPropagationLossModel() [constructor] cls.add_constructor([]) ## propagation-loss-model.h (module 'propagation'): double ns3::NakagamiPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_virtual=True, visibility='private') ## propagation-loss-model.h (module 'propagation'): int64_t ns3::NakagamiPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True, visibility='private') return def register_Ns3NeverLosChannelConditionModel_methods(root_module, cls): ## channel-condition-model.h (module 'propagation'): static ns3::TypeId ns3::NeverLosChannelConditionModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## channel-condition-model.h (module 'propagation'): ns3::NeverLosChannelConditionModel::NeverLosChannelConditionModel() [constructor] cls.add_constructor([]) ## channel-condition-model.h (module 'propagation'): ns3::Ptr<ns3::ChannelCondition> ns3::NeverLosChannelConditionModel::GetChannelCondition(ns3::Ptr<const ns3::MobilityModel> a, ns3::Ptr<const ns3::MobilityModel> b) const [member function] cls.add_method('GetChannelCondition', 'ns3::Ptr< ns3::ChannelCondition >', [param('ns3::Ptr< ns3::MobilityModel const >', 'a'), param('ns3::Ptr< ns3::MobilityModel const >', 'b')], is_const=True, is_virtual=True) ## channel-condition-model.h (module 'propagation'): int64_t ns3::NeverLosChannelConditionModel::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True) return def register_Ns3NeverLosVehicleChannelConditionModel_methods(root_module, cls): ## channel-condition-model.h (module 'propagation'): static ns3::TypeId ns3::NeverLosVehicleChannelConditionModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## channel-condition-model.h (module 'propagation'): ns3::NeverLosVehicleChannelConditionModel::NeverLosVehicleChannelConditionModel() [constructor] cls.add_constructor([]) ## channel-condition-model.h (module 'propagation'): ns3::Ptr<ns3::ChannelCondition> ns3::NeverLosVehicleChannelConditionModel::GetChannelCondition(ns3::Ptr<const ns3::MobilityModel> a, ns3::Ptr<const ns3::MobilityModel> b) const [member function] cls.add_method('GetChannelCondition', 'ns3::Ptr< ns3::ChannelCondition >', [param('ns3::Ptr< ns3::MobilityModel const >', 'a'), param('ns3::Ptr< ns3::MobilityModel const >', 'b')], is_const=True, is_virtual=True) ## channel-condition-model.h (module 'propagation'): int64_t ns3::NeverLosVehicleChannelConditionModel::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True) return def register_Ns3NormalRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::INFINITE_VALUE [variable] cls.add_static_attribute('INFINITE_VALUE', 'double const', is_const=True) ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::NormalRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::NormalRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetMean() const [member function] cls.add_method('GetMean', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetVariance() const [member function] cls.add_method('GetVariance', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetBound() const [member function] cls.add_method('GetBound', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue(double mean, double variance, double bound=ns3::NormalRandomVariable::INFINITE_VALUE) [member function] cls.add_method('GetValue', 'double', [param('double', 'mean'), param('double', 'variance'), param('double', 'bound', default_value='ns3::NormalRandomVariable::INFINITE_VALUE')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger(uint32_t mean, uint32_t variance, uint32_t bound) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'mean'), param('uint32_t', 'variance'), param('uint32_t', 'bound')]) ## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3OkumuraHataPropagationLossModel_methods(root_module, cls): ## okumura-hata-propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::OkumuraHataPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## okumura-hata-propagation-loss-model.h (module 'propagation'): ns3::OkumuraHataPropagationLossModel::OkumuraHataPropagationLossModel() [constructor] cls.add_constructor([]) ## okumura-hata-propagation-loss-model.h (module 'propagation'): double ns3::OkumuraHataPropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('GetLoss', 'double', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True) ## okumura-hata-propagation-loss-model.h (module 'propagation'): double ns3::OkumuraHataPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function] cls.add_method('DoCalcRxPower', 'double', [param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')], is_const=True, is_virtual=True, visibility='private') ## okumura-hata-propagation-loss-model.h (module 'propagation'): int64_t ns3::OkumuraHataPropagationLossModel::DoAssignStreams(int64_t stream) [member function] cls.add_method('DoAssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True, visibility='private') return def register_Ns3ParetoRandomVariable_methods(root_module, cls): ## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ParetoRandomVariable::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable::ParetoRandomVariable() [constructor] cls.add_constructor([]) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetScale() const [member function] cls.add_method('GetScale', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetShape() const [member function] cls.add_method('GetShape', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetBound() const [member function] cls.add_method('GetBound', 'double', [], is_const=True) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue(double scale, double shape, double bound) [member function] cls.add_method('GetValue', 'double', [param('double', 'scale'), param('double', 'shape'), param('double', 'bound')]) ## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger(uint32_t scale, uint32_t shape, uint32_t bound) [member function] cls.add_method('GetInteger', 'uint32_t', [param('uint32_t', 'scale'), param('uint32_t', 'shape'), param('uint32_t', 'bound')]) ## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue() [member function] cls.add_method('GetValue', 'double', [], is_virtual=True) ## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger() [member function] cls.add_method('GetInteger', 'uint32_t', [], is_virtual=True) return def register_Ns3ThreeGppChannelConditionModel_methods(root_module, cls): ## channel-condition-model.h (module 'propagation'): ns3::ThreeGppChannelConditionModel::ThreeGppChannelConditionModel() [constructor] cls.add_constructor([]) ## channel-condition-model.h (module 'propagation'): int64_t ns3::ThreeGppChannelConditionModel::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True) ## channel-condition-model.h (module 'propagation'): ns3::Ptr<ns3::ChannelCondition> ns3::ThreeGppChannelConditionModel::GetChannelCondition(ns3::Ptr<const ns3::MobilityModel> a, ns3::Ptr<const ns3::MobilityModel> b) const [member function] cls.add_method('GetChannelCondition', 'ns3::Ptr< ns3::ChannelCondition >', [param('ns3::Ptr< ns3::MobilityModel const >', 'a'), param('ns3::Ptr< ns3::MobilityModel const >', 'b')], is_const=True, is_virtual=True) ## channel-condition-model.h (module 'propagation'): static ns3::TypeId ns3::ThreeGppChannelConditionModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## channel-condition-model.h (module 'propagation'): static double ns3::ThreeGppChannelConditionModel::Calculate2dDistance(ns3::Vector const & a, ns3::Vector const & b) [member function] cls.add_method('Calculate2dDistance', 'double', [param('ns3::Vector const &', 'a'), param('ns3::Vector const &', 'b')], is_static=True, visibility='protected') ## channel-condition-model.h (module 'propagation'): void ns3::ThreeGppChannelConditionModel::DoDispose() [member function] cls.add_method('DoDispose', 'void', [], is_virtual=True, visibility='protected') ## channel-condition-model.h (module 'propagation'): double ns3::ThreeGppChannelConditionModel::ComputePlos(ns3::Ptr<const ns3::MobilityModel> a, ns3::Ptr<const ns3::MobilityModel> b) const [member function] cls.add_method('ComputePlos', 'double', [param('ns3::Ptr< ns3::MobilityModel const >', 'a'), param('ns3::Ptr< ns3::MobilityModel const >', 'b')], is_const=True, is_pure_virtual=True, is_virtual=True, visibility='private') ## channel-condition-model.h (module 'propagation'): double ns3::ThreeGppChannelConditionModel::ComputePnlos(ns3::Ptr<const ns3::MobilityModel> a, ns3::Ptr<const ns3::MobilityModel> b) const [member function] cls.add_method('ComputePnlos', 'double', [param('ns3::Ptr< ns3::MobilityModel const >', 'a'), param('ns3::Ptr< ns3::MobilityModel const >', 'b')], is_const=True, is_virtual=True, visibility='private') return def register_Ns3ThreeGppIndoorMixedOfficeChannelConditionModel_methods(root_module, cls): ## channel-condition-model.h (module 'propagation'): static ns3::TypeId ns3::ThreeGppIndoorMixedOfficeChannelConditionModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## channel-condition-model.h (module 'propagation'): ns3::ThreeGppIndoorMixedOfficeChannelConditionModel::ThreeGppIndoorMixedOfficeChannelConditionModel() [constructor] cls.add_constructor([]) ## channel-condition-model.h (module 'propagation'): double ns3::ThreeGppIndoorMixedOfficeChannelConditionModel::ComputePlos(ns3::Ptr<const ns3::MobilityModel> a, ns3::Ptr<const ns3::MobilityModel> b) const [member function] cls.add_method('ComputePlos', 'double', [param('ns3::Ptr< ns3::MobilityModel const >', 'a'), param('ns3::Ptr< ns3::MobilityModel const >', 'b')], is_const=True, is_virtual=True, visibility='private') return def register_Ns3ThreeGppIndoorOfficePropagationLossModel_methods(root_module, cls): ## three-gpp-propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::ThreeGppIndoorOfficePropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## three-gpp-propagation-loss-model.h (module 'propagation'): ns3::ThreeGppIndoorOfficePropagationLossModel::ThreeGppIndoorOfficePropagationLossModel() [constructor] cls.add_constructor([]) ## three-gpp-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppIndoorOfficePropagationLossModel::GetLossLos(double distance2D, double distance3D, double hUt, double hBs) const [member function] cls.add_method('GetLossLos', 'double', [param('double', 'distance2D'), param('double', 'distance3D'), param('double', 'hUt'), param('double', 'hBs')], is_const=True, is_virtual=True, visibility='private') ## three-gpp-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppIndoorOfficePropagationLossModel::GetLossNlos(double distance2D, double distance3D, double hUt, double hBs) const [member function] cls.add_method('GetLossNlos', 'double', [param('double', 'distance2D'), param('double', 'distance3D'), param('double', 'hUt'), param('double', 'hBs')], is_const=True, is_virtual=True, visibility='private') ## three-gpp-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppIndoorOfficePropagationLossModel::GetShadowingStd(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, ns3::ChannelCondition::LosConditionValue cond) const [member function] cls.add_method('GetShadowingStd', 'double', [param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('ns3::ChannelCondition::LosConditionValue', 'cond')], is_const=True, is_virtual=True, visibility='private') ## three-gpp-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppIndoorOfficePropagationLossModel::GetShadowingCorrelationDistance(ns3::ChannelCondition::LosConditionValue cond) const [member function] cls.add_method('GetShadowingCorrelationDistance', 'double', [param('ns3::ChannelCondition::LosConditionValue', 'cond')], is_const=True, is_virtual=True, visibility='private') return def register_Ns3ThreeGppIndoorOpenOfficeChannelConditionModel_methods(root_module, cls): ## channel-condition-model.h (module 'propagation'): static ns3::TypeId ns3::ThreeGppIndoorOpenOfficeChannelConditionModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## channel-condition-model.h (module 'propagation'): ns3::ThreeGppIndoorOpenOfficeChannelConditionModel::ThreeGppIndoorOpenOfficeChannelConditionModel() [constructor] cls.add_constructor([]) ## channel-condition-model.h (module 'propagation'): double ns3::ThreeGppIndoorOpenOfficeChannelConditionModel::ComputePlos(ns3::Ptr<const ns3::MobilityModel> a, ns3::Ptr<const ns3::MobilityModel> b) const [member function] cls.add_method('ComputePlos', 'double', [param('ns3::Ptr< ns3::MobilityModel const >', 'a'), param('ns3::Ptr< ns3::MobilityModel const >', 'b')], is_const=True, is_virtual=True, visibility='private') return def register_Ns3ThreeGppRmaChannelConditionModel_methods(root_module, cls): ## channel-condition-model.h (module 'propagation'): static ns3::TypeId ns3::ThreeGppRmaChannelConditionModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## channel-condition-model.h (module 'propagation'): ns3::ThreeGppRmaChannelConditionModel::ThreeGppRmaChannelConditionModel() [constructor] cls.add_constructor([]) ## channel-condition-model.h (module 'propagation'): double ns3::ThreeGppRmaChannelConditionModel::ComputePlos(ns3::Ptr<const ns3::MobilityModel> a, ns3::Ptr<const ns3::MobilityModel> b) const [member function] cls.add_method('ComputePlos', 'double', [param('ns3::Ptr< ns3::MobilityModel const >', 'a'), param('ns3::Ptr< ns3::MobilityModel const >', 'b')], is_const=True, is_virtual=True, visibility='private') return def register_Ns3ThreeGppUmaChannelConditionModel_methods(root_module, cls): ## channel-condition-model.h (module 'propagation'): static ns3::TypeId ns3::ThreeGppUmaChannelConditionModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## channel-condition-model.h (module 'propagation'): ns3::ThreeGppUmaChannelConditionModel::ThreeGppUmaChannelConditionModel() [constructor] cls.add_constructor([]) ## channel-condition-model.h (module 'propagation'): double ns3::ThreeGppUmaChannelConditionModel::ComputePlos(ns3::Ptr<const ns3::MobilityModel> a, ns3::Ptr<const ns3::MobilityModel> b) const [member function] cls.add_method('ComputePlos', 'double', [param('ns3::Ptr< ns3::MobilityModel const >', 'a'), param('ns3::Ptr< ns3::MobilityModel const >', 'b')], is_const=True, is_virtual=True, visibility='private') return def register_Ns3ThreeGppUmiStreetCanyonChannelConditionModel_methods(root_module, cls): ## channel-condition-model.h (module 'propagation'): static ns3::TypeId ns3::ThreeGppUmiStreetCanyonChannelConditionModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## channel-condition-model.h (module 'propagation'): ns3::ThreeGppUmiStreetCanyonChannelConditionModel::ThreeGppUmiStreetCanyonChannelConditionModel() [constructor] cls.add_constructor([]) ## channel-condition-model.h (module 'propagation'): double ns3::ThreeGppUmiStreetCanyonChannelConditionModel::ComputePlos(ns3::Ptr<const ns3::MobilityModel> a, ns3::Ptr<const ns3::MobilityModel> b) const [member function] cls.add_method('ComputePlos', 'double', [param('ns3::Ptr< ns3::MobilityModel const >', 'a'), param('ns3::Ptr< ns3::MobilityModel const >', 'b')], is_const=True, is_virtual=True, visibility='private') return def register_Ns3ThreeGppV2vHighwayPropagationLossModel_methods(root_module, cls): ## three-gpp-v2v-propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::ThreeGppV2vHighwayPropagationLossModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## three-gpp-v2v-propagation-loss-model.h (module 'propagation'): ns3::ThreeGppV2vHighwayPropagationLossModel::ThreeGppV2vHighwayPropagationLossModel() [constructor] cls.add_constructor([]) ## three-gpp-v2v-propagation-loss-model.h (module 'propagation'): double ns3::ThreeGppV2vHighwayPropagationLossModel::GetLossLos(double distance2D, double distance3D, double hUt, double hBs) const [member function] cls.add_method('GetLossLos', 'double', [param('double', 'distance2D'), param('double', 'distance3D'), param('double', 'hUt'), param('double', 'hBs')], is_const=True, is_virtual=True, visibility='private') return def register_Ns3TimeValue_methods(root_module, cls): ## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor] cls.add_constructor([]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor] cls.add_constructor([param('ns3::Time const &', 'value')]) ## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [constructor] cls.add_constructor([param('ns3::TimeValue const &', 'arg0')]) ## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function] cls.add_method('Get', 'ns3::Time', [], is_const=True) ## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Time const &', 'value')]) return def register_Ns3TypeIdChecker_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [constructor] cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')]) return def register_Ns3TypeIdValue_methods(root_module, cls): ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor] cls.add_constructor([]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor] cls.add_constructor([param('ns3::TypeId const &', 'value')]) ## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [constructor] cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')]) ## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function] cls.add_method('Get', 'ns3::TypeId', [], is_const=True) ## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function] cls.add_method('Set', 'void', [param('ns3::TypeId const &', 'value')]) return def register_Ns3Vector2DChecker_methods(root_module, cls): ## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker(ns3::Vector2DChecker const & arg0) [constructor] cls.add_constructor([param('ns3::Vector2DChecker const &', 'arg0')]) return def register_Ns3Vector2DValue_methods(root_module, cls): ## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2D const & value) [constructor] cls.add_constructor([param('ns3::Vector2D const &', 'value')]) ## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2DValue const & arg0) [constructor] cls.add_constructor([param('ns3::Vector2DValue const &', 'arg0')]) ## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector2DValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## vector.h (module 'core'): bool ns3::Vector2DValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## vector.h (module 'core'): ns3::Vector2D ns3::Vector2DValue::Get() const [member function] cls.add_method('Get', 'ns3::Vector2D', [], is_const=True) ## vector.h (module 'core'): std::string ns3::Vector2DValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## vector.h (module 'core'): void ns3::Vector2DValue::Set(ns3::Vector2D const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Vector2D const &', 'value')]) return def register_Ns3Vector3DChecker_methods(root_module, cls): ## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker(ns3::Vector3DChecker const & arg0) [constructor] cls.add_constructor([param('ns3::Vector3DChecker const &', 'arg0')]) return def register_Ns3Vector3DValue_methods(root_module, cls): ## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue() [constructor] cls.add_constructor([]) ## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3D const & value) [constructor] cls.add_constructor([param('ns3::Vector3D const &', 'value')]) ## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3DValue const & arg0) [constructor] cls.add_constructor([param('ns3::Vector3DValue const &', 'arg0')]) ## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector3DValue::Copy() const [member function] cls.add_method('Copy', 'ns3::Ptr< ns3::AttributeValue >', [], is_const=True, is_virtual=True) ## vector.h (module 'core'): bool ns3::Vector3DValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function] cls.add_method('DeserializeFromString', 'bool', [param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_virtual=True) ## vector.h (module 'core'): ns3::Vector3D ns3::Vector3DValue::Get() const [member function] cls.add_method('Get', 'ns3::Vector3D', [], is_const=True) ## vector.h (module 'core'): std::string ns3::Vector3DValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function] cls.add_method('SerializeToString', 'std::string', [param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')], is_const=True, is_virtual=True) ## vector.h (module 'core'): void ns3::Vector3DValue::Set(ns3::Vector3D const & value) [member function] cls.add_method('Set', 'void', [param('ns3::Vector3D const &', 'value')]) return def register_Ns3AlwaysLosChannelConditionModel_methods(root_module, cls): ## channel-condition-model.h (module 'propagation'): static ns3::TypeId ns3::AlwaysLosChannelConditionModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## channel-condition-model.h (module 'propagation'): ns3::AlwaysLosChannelConditionModel::AlwaysLosChannelConditionModel() [constructor] cls.add_constructor([]) ## channel-condition-model.h (module 'propagation'): ns3::Ptr<ns3::ChannelCondition> ns3::AlwaysLosChannelConditionModel::GetChannelCondition(ns3::Ptr<const ns3::MobilityModel> a, ns3::Ptr<const ns3::MobilityModel> b) const [member function] cls.add_method('GetChannelCondition', 'ns3::Ptr< ns3::ChannelCondition >', [param('ns3::Ptr< ns3::MobilityModel const >', 'a'), param('ns3::Ptr< ns3::MobilityModel const >', 'b')], is_const=True, is_virtual=True) ## channel-condition-model.h (module 'propagation'): int64_t ns3::AlwaysLosChannelConditionModel::AssignStreams(int64_t stream) [member function] cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')], is_virtual=True) return def register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor] cls.add_constructor([param('ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')]) ## callback.h (module 'core'): static std::string ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function] cls.add_method('DoGetTypeid', 'std::string', [], is_static=True) ## callback.h (module 'core'): std::string ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function] cls.add_method('GetTypeid', 'std::string', [], is_const=True, is_virtual=True) ## callback.h (module 'core'): ns3::ObjectBase * ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()() [member operator] cls.add_method('operator()', 'ns3::ObjectBase *', [], custom_name='__call__', is_pure_virtual=True, is_virtual=True) return def register_Ns3CallbackImpl__Void_Ns3Ptr__lt__const_ns3MobilityModel__gt___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls): ## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<const ns3::MobilityModel>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor] cls.add_constructor([]) ## callback.h (module 'core'): ns3::CallbackImpl<void, ns3::Ptr<const ns3::MobilityModel>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<void, ns3::Ptr<const ns3::MobilityModel>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor] cls.add_constructor([param('ns3::CallbackImpl< void, ns3::Ptr< ns3::MobilityModel const >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')]) ## callback.h (module 'core'): static std::string ns3::CallbackImpl<void, ns3::Ptr<const ns3::MobilityModel>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function] cls.add_method('DoGetTypeid', 'std::string', [], is_static=True) ## callback.h (module 'core'): std::string ns3::CallbackImpl<void, ns3::Ptr<const ns3::MobilityModel>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function] cls.add_method('GetTypeid', 'std::string', [], is_const=True, is_virtual=True) ## callback.h (module 'core'): void ns3::CallbackImpl<void, ns3::Ptr<const ns3::MobilityModel>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()(ns3::Ptr<const ns3::MobilityModel> arg0) [member operator] cls.add_method('operator()', 'void', [param('ns3::Ptr< ns3::MobilityModel const >', 'arg0')], custom_name='__call__', is_pure_virtual=True, is_virtual=True) return def register_Ns3ProbabilisticV2vHighwayChannelConditionModel_methods(root_module, cls): ## probabilistic-v2v-channel-condition-model.h (module 'propagation'): static ns3::TypeId ns3::ProbabilisticV2vHighwayChannelConditionModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## probabilistic-v2v-channel-condition-model.h (module 'propagation'): ns3::ProbabilisticV2vHighwayChannelConditionModel::ProbabilisticV2vHighwayChannelConditionModel() [constructor] cls.add_constructor([]) ## probabilistic-v2v-channel-condition-model.h (module 'propagation'): double ns3::ProbabilisticV2vHighwayChannelConditionModel::ComputePlos(ns3::Ptr<const ns3::MobilityModel> a, ns3::Ptr<const ns3::MobilityModel> b) const [member function] cls.add_method('ComputePlos', 'double', [param('ns3::Ptr< ns3::MobilityModel const >', 'a'), param('ns3::Ptr< ns3::MobilityModel const >', 'b')], is_const=True, is_virtual=True, visibility='private') ## probabilistic-v2v-channel-condition-model.h (module 'propagation'): double ns3::ProbabilisticV2vHighwayChannelConditionModel::ComputePnlos(ns3::Ptr<const ns3::MobilityModel> a, ns3::Ptr<const ns3::MobilityModel> b) const [member function] cls.add_method('ComputePnlos', 'double', [param('ns3::Ptr< ns3::MobilityModel const >', 'a'), param('ns3::Ptr< ns3::MobilityModel const >', 'b')], is_const=True, is_virtual=True, visibility='private') return def register_Ns3ProbabilisticV2vUrbanChannelConditionModel_methods(root_module, cls): ## probabilistic-v2v-channel-condition-model.h (module 'propagation'): static ns3::TypeId ns3::ProbabilisticV2vUrbanChannelConditionModel::GetTypeId() [member function] cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True) ## probabilistic-v2v-channel-condition-model.h (module 'propagation'): ns3::ProbabilisticV2vUrbanChannelConditionModel::ProbabilisticV2vUrbanChannelConditionModel() [constructor] cls.add_constructor([]) ## probabilistic-v2v-channel-condition-model.h (module 'propagation'): double ns3::ProbabilisticV2vUrbanChannelConditionModel::ComputePlos(ns3::Ptr<const ns3::MobilityModel> a, ns3::Ptr<const ns3::MobilityModel> b) const [member function] cls.add_method('ComputePlos', 'double', [param('ns3::Ptr< ns3::MobilityModel const >', 'a'), param('ns3::Ptr< ns3::MobilityModel const >', 'b')], is_const=True, is_virtual=True, visibility='private') ## probabilistic-v2v-channel-condition-model.h (module 'propagation'): double ns3::ProbabilisticV2vUrbanChannelConditionModel::ComputePnlos(ns3::Ptr<const ns3::MobilityModel> a, ns3::Ptr<const ns3::MobilityModel> b) const [member function] cls.add_method('ComputePnlos', 'double', [param('ns3::Ptr< ns3::MobilityModel const >', 'a'), param('ns3::Ptr< ns3::MobilityModel const >', 'b')], is_const=True, is_virtual=True, visibility='private') return def register_Ns3HashImplementation_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [constructor] cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor] cls.add_constructor([]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, std::size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('std::size_t const', 'size')], is_pure_virtual=True, is_virtual=True) ## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, std::size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('std::size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function] cls.add_method('clear', 'void', [], is_pure_virtual=True, is_virtual=True) return def register_Ns3HashFunctionFnv1a_methods(root_module, cls): ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [constructor] cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')]) ## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor] cls.add_constructor([]) ## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('size_t const', 'size')], is_virtual=True) ## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionHash32_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [constructor] cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor] cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, std::size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('std::size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionHash64_methods(root_module, cls): ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [constructor] cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')]) ## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor] cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')]) ## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, std::size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('std::size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, std::size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('std::size_t const', 'size')], is_virtual=True) ## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_Ns3HashFunctionMurmur3_methods(root_module, cls): ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [constructor] cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')]) ## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor] cls.add_constructor([]) ## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, std::size_t const size) [member function] cls.add_method('GetHash32', 'uint32_t', [param('char const *', 'buffer'), param('std::size_t const', 'size')], is_virtual=True) ## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, std::size_t const size) [member function] cls.add_method('GetHash64', 'uint64_t', [param('char const *', 'buffer'), param('std::size_t const', 'size')], is_virtual=True) ## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function] cls.add_method('clear', 'void', [], is_virtual=True) return def register_functions(root_module): module = root_module register_functions_ns3_FatalImpl(module.add_cpp_namespace('FatalImpl'), root_module) register_functions_ns3_Hash(module.add_cpp_namespace('Hash'), root_module) register_functions_ns3_TracedValueCallback(module.add_cpp_namespace('TracedValueCallback'), root_module) return def register_functions_ns3_FatalImpl(module, root_module): return def register_functions_ns3_Hash(module, root_module): register_functions_ns3_Hash_Function(module.add_cpp_namespace('Function'), root_module) return def register_functions_ns3_Hash_Function(module, root_module): return def register_functions_ns3_TracedValueCallback(module, root_module): return def main(): out = FileCodeSink(sys.stdout) root_module = module_init() register_types(root_module) register_methods(root_module) register_functions(root_module) root_module.generate(out) if __name__ == '__main__': main()