repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
ChenJunor/hue | desktop/core/ext-py/pycrypto-2.6.1/lib/Crypto/SelfTest/PublicKey/__init__.py | 114 | 1842 | # -*- coding: utf-8 -*-
#
# SelfTest/PublicKey/__init__.py: Self-test for public key crypto
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test for public-key crypto"""
__revision__ = "$Id$"
import os
def get_tests(config={}):
tests = []
from Crypto.SelfTest.PublicKey import test_DSA; tests += test_DSA.get_tests(config=config)
from Crypto.SelfTest.PublicKey import test_RSA; tests += test_RSA.get_tests(config=config)
from Crypto.SelfTest.PublicKey import test_importKey; tests += test_importKey.get_tests(config=config)
from Crypto.SelfTest.PublicKey import test_ElGamal; tests += test_ElGamal.get_tests(config=config)
return tests
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| apache-2.0 |
Dhivyap/ansible | lib/ansible/modules/cloud/google/gcp_pubsub_topic.py | 3 | 12632 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_pubsub_topic
description:
- A named resource to which messages are sent by publishers.
short_description: Creates a GCP Topic
version_added: '2.6'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
name:
description:
- Name of the topic.
required: true
type: str
kms_key_name:
description:
- The resource name of the Cloud KMS CryptoKey to be used to protect access to
messages published on this topic. Your project's PubSub service account (`service-{{PROJECT_NUMBER}}@gcp-sa-pubsub.iam.gserviceaccount.com`)
must have `roles/cloudkms.cryptoKeyEncrypterDecrypter` to use this feature.
- The expected format is `projects/*/locations/*/keyRings/*/cryptoKeys/*` .
required: false
type: str
version_added: '2.9'
labels:
description:
- A set of key/value label pairs to assign to this Topic.
required: false
type: dict
version_added: '2.8'
message_storage_policy:
description:
- Policy constraining the set of Google Cloud Platform regions where messages
published to the topic may be stored. If not present, then no constraints are
in effect.
required: false
type: dict
version_added: '2.9'
suboptions:
allowed_persistence_regions:
description:
- A list of IDs of GCP regions where messages that are published to the topic
may be persisted in storage. Messages published by publishers running in
non-allowed GCP regions (or running outside of GCP altogether) will be routed
for storage in one of the allowed regions. An empty list means that no regions
are allowed, and is not a valid configuration.
required: true
type: list
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- 'API Reference: U(https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics)'
- 'Managing Topics: U(https://cloud.google.com/pubsub/docs/admin#managing_topics)'
- for authentication, you can set service_account_file using the c(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the c(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: create a topic
gcp_pubsub_topic:
name: test-topic1
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
name:
description:
- Name of the topic.
returned: success
type: str
kmsKeyName:
description:
- The resource name of the Cloud KMS CryptoKey to be used to protect access to messages
published on this topic. Your project's PubSub service account (`service-{{PROJECT_NUMBER}}@gcp-sa-pubsub.iam.gserviceaccount.com`)
must have `roles/cloudkms.cryptoKeyEncrypterDecrypter` to use this feature.
- The expected format is `projects/*/locations/*/keyRings/*/cryptoKeys/*` .
returned: success
type: str
labels:
description:
- A set of key/value label pairs to assign to this Topic.
returned: success
type: dict
messageStoragePolicy:
description:
- Policy constraining the set of Google Cloud Platform regions where messages published
to the topic may be stored. If not present, then no constraints are in effect.
returned: success
type: complex
contains:
allowedPersistenceRegions:
description:
- A list of IDs of GCP regions where messages that are published to the topic
may be persisted in storage. Messages published by publishers running in non-allowed
GCP regions (or running outside of GCP altogether) will be routed for storage
in one of the allowed regions. An empty list means that no regions are allowed,
and is not a valid configuration.
returned: success
type: list
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
import re
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(required=True, type='str'),
kms_key_name=dict(type='str'),
labels=dict(type='dict'),
message_storage_policy=dict(type='dict', options=dict(allowed_persistence_regions=dict(required=True, type='list', elements='str'))),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/pubsub']
state = module.params['state']
fetch = fetch_resource(module, self_link(module))
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), fetch)
fetch = fetch_resource(module, self_link(module))
changed = True
else:
delete(module, self_link(module))
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, self_link(module))
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link):
auth = GcpSession(module, 'pubsub')
return return_if_object(module, auth.put(link, resource_to_request(module)))
def update(module, link, fetch):
auth = GcpSession(module, 'pubsub')
params = {'updateMask': updateMask(resource_to_request(module), response_to_hash(module, fetch))}
request = resource_to_request(module)
del request['name']
return return_if_object(module, auth.patch(link, request, params=params))
def updateMask(request, response):
update_mask = []
if request.get('labels') != response.get('labels'):
update_mask.append('labels')
if request.get('messageStoragePolicy') != response.get('messageStoragePolicy'):
update_mask.append('messageStoragePolicy')
return ','.join(update_mask)
def delete(module, link):
auth = GcpSession(module, 'pubsub')
return return_if_object(module, auth.delete(link))
def resource_to_request(module):
request = {
u'name': name_pattern(module.params.get('name'), module),
u'kmsKeyName': module.params.get('kms_key_name'),
u'labels': module.params.get('labels'),
u'messageStoragePolicy': TopicMessagestoragepolicy(module.params.get('message_storage_policy', {}), module).to_request(),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, allow_not_found=True):
auth = GcpSession(module, 'pubsub')
return return_if_object(module, auth.get(link), allow_not_found)
def self_link(module):
return "https://pubsub.googleapis.com/v1/projects/{project}/topics/{name}".format(**module.params)
def collection(module):
return "https://pubsub.googleapis.com/v1/projects/{project}/topics".format(**module.params)
def return_if_object(module, response, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'name': name_pattern(module.params.get('name'), module),
u'kmsKeyName': module.params.get('kms_key_name'),
u'labels': response.get(u'labels'),
u'messageStoragePolicy': TopicMessagestoragepolicy(response.get(u'messageStoragePolicy', {}), module).from_response(),
}
def name_pattern(name, module):
if name is None:
return
regex = r"projects/.*/topics/.*"
if not re.match(regex, name):
name = "projects/{project}/topics/{name}".format(**module.params)
return name
class TopicMessagestoragepolicy(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'allowedPersistenceRegions': self.request.get('allowed_persistence_regions')})
def from_response(self):
return remove_nones_from_dict({u'allowedPersistenceRegions': self.request.get(u'allowedPersistenceRegions')})
if __name__ == '__main__':
main()
| gpl-3.0 |
NvanAdrichem/networkx | networkx/algorithms/components/tests/test_subgraph_copies.py | 52 | 3497 | """ Tests for subgraphs attributes
"""
from copy import deepcopy
from nose.tools import assert_equal
import networkx as nx
class TestSubgraphAttributesDicts:
def setUp(self):
self.undirected = [
nx.connected_component_subgraphs,
nx.biconnected_component_subgraphs,
]
self.directed = [
nx.weakly_connected_component_subgraphs,
nx.strongly_connected_component_subgraphs,
nx.attracting_component_subgraphs,
]
self.subgraph_funcs = self.undirected + self.directed
self.D = nx.DiGraph()
self.D.add_edge(1, 2, eattr='red')
self.D.add_edge(2, 1, eattr='red')
self.D.node[1]['nattr'] = 'blue'
self.D.graph['gattr'] = 'green'
self.G = nx.Graph()
self.G.add_edge(1, 2, eattr='red')
self.G.node[1]['nattr'] = 'blue'
self.G.graph['gattr'] = 'green'
def test_subgraphs_default_copy_behavior(self):
# Test the default behavior of subgraph functions
# For the moment (1.10) the default is to copy
for subgraph_func in self.subgraph_funcs:
G = deepcopy(self.G if subgraph_func in self.undirected else self.D)
SG = list(subgraph_func(G))[0]
assert_equal(SG[1][2]['eattr'], 'red')
assert_equal(SG.node[1]['nattr'], 'blue')
assert_equal(SG.graph['gattr'], 'green')
SG[1][2]['eattr'] = 'foo'
assert_equal(G[1][2]['eattr'], 'red')
assert_equal(SG[1][2]['eattr'], 'foo')
SG.node[1]['nattr'] = 'bar'
assert_equal(G.node[1]['nattr'], 'blue')
assert_equal(SG.node[1]['nattr'], 'bar')
SG.graph['gattr'] = 'baz'
assert_equal(G.graph['gattr'], 'green')
assert_equal(SG.graph['gattr'], 'baz')
def test_subgraphs_copy(self):
for subgraph_func in self.subgraph_funcs:
test_graph = self.G if subgraph_func in self.undirected else self.D
G = deepcopy(test_graph)
SG = list(subgraph_func(G, copy=True))[0]
assert_equal(SG[1][2]['eattr'], 'red')
assert_equal(SG.node[1]['nattr'], 'blue')
assert_equal(SG.graph['gattr'], 'green')
SG[1][2]['eattr'] = 'foo'
assert_equal(G[1][2]['eattr'], 'red')
assert_equal(SG[1][2]['eattr'], 'foo')
SG.node[1]['nattr'] = 'bar'
assert_equal(G.node[1]['nattr'], 'blue')
assert_equal(SG.node[1]['nattr'], 'bar')
SG.graph['gattr'] = 'baz'
assert_equal(G.graph['gattr'], 'green')
assert_equal(SG.graph['gattr'], 'baz')
def test_subgraphs_no_copy(self):
for subgraph_func in self.subgraph_funcs:
G = deepcopy(self.G if subgraph_func in self.undirected else self.D)
SG = list(subgraph_func(G, copy=False))[0]
assert_equal(SG[1][2]['eattr'], 'red')
assert_equal(SG.node[1]['nattr'], 'blue')
assert_equal(SG.graph['gattr'], 'green')
SG[1][2]['eattr'] = 'foo'
assert_equal(G[1][2]['eattr'], 'foo')
assert_equal(SG[1][2]['eattr'], 'foo')
SG.node[1]['nattr'] = 'bar'
assert_equal(G.node[1]['nattr'], 'bar')
assert_equal(SG.node[1]['nattr'], 'bar')
SG.graph['gattr'] = 'baz'
assert_equal(G.graph['gattr'], 'baz')
assert_equal(SG.graph['gattr'], 'baz')
| bsd-3-clause |
chronossc/openpyxl | openpyxl/tests/test_dump.py | 2 | 3020 |
# file openpyxl/tests/test_dump.py
# Copyright (c) 2010 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: Eric Gazoni
# Python stdlib imports
from datetime import time, datetime
# 3rd party imports
from nose.tools import eq_, raises, assert_raises
from openpyxl.workbook import Workbook
from openpyxl.cell import get_column_letter
from openpyxl.reader.excel import load_workbook
from openpyxl.writer.strings import StringTableBuilder
from tempfile import NamedTemporaryFile
import os
import shutil
def test_dump_sheet():
test_file = NamedTemporaryFile(prefix='openpyxl.', suffix='.xlsx', delete=False)
test_file.close()
test_filename = test_file.name
wb = Workbook(optimized_write = True)
ws = wb.create_sheet()
letters = [get_column_letter(x+1) for x in xrange(20)]
expected_rows = []
for row in xrange(20):
expected_rows.append(['%s%d' % (letter, row+1) for letter in letters])
for row in xrange(20):
expected_rows.append([(row+1) for letter in letters])
for row in xrange(10):
expected_rows.append([datetime(2010, ((x % 12)+1), row+1) for x in range(len(letters))])
for row in xrange(20):
expected_rows.append(['=%s%d' % (letter, row+1) for letter in letters])
for row in expected_rows:
ws.append(row)
wb.save(test_filename)
wb2 = load_workbook(test_filename, True)
ws = wb2.worksheets[0]
for ex_row, ws_row in zip(expected_rows[:-20], ws.iter_rows()):
for ex_cell, ws_cell in zip(ex_row, ws_row):
eq_(ex_cell, ws_cell.internal_value)
os.remove(test_filename)
def test_table_builder():
sb = StringTableBuilder()
result = {'a':0, 'b':1, 'c':2, 'd':3}
for letter in sorted(result.keys()):
for x in range(5):
sb.add(letter)
table = dict(sb.get_table())
for key,idx in result.iteritems():
eq_(idx, table[key])
| mit |
HeraclesHX/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 344 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
olgabrani/synnefo | snf-cyclades-app/synnefo/logic/management/commands/server-import.py | 9 | 9169 | # Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from optparse import make_option
from django.core.management.base import CommandError
from synnefo.management import common
from synnefo.db.models import VirtualMachine, Network, Flavor, VolumeType
from synnefo.logic.utils import id_from_network_name, id_from_instance_name
from synnefo.logic.backend import wait_for_job, connect_to_network
from snf_django.management.commands import SynnefoCommand
from synnefo.logic.rapi import GanetiApiError
from synnefo.logic import servers
from synnefo import quotas
HELP_MSG = """
Import an existing Ganeti instance into Synnefo, with the attributes specified
by the command line options. In order to be imported, the instance will be
turned off, renamed and then turned on again.
Importing an instance will fail, if the instance has NICs that are connected to
a network not belonging to Synnefo. You can either manually modify the instance
or use --new-nics option, that will remove all old NICs, and create a new one
connected to a public network of Synnefo.
"""
class Command(SynnefoCommand):
help = "Import an existing Ganeti VM into Synnefo." + HELP_MSG
args = "<ganeti_instance_name>"
output_transaction = True
option_list = SynnefoCommand.option_list + (
make_option(
"--backend-id",
dest="backend_id",
help="Unique identifier of the Ganeti backend that"
" hosts the VM. Use snf-manage backend-list to"
" find out available backends."),
make_option(
"--user",
dest="user_id",
help="Unique identifier of the owner of the server"),
make_option(
"--image",
dest="image_id",
default=None,
help="Unique identifier of the image."
" Use snf-manage image-list to find out"
" available images."),
make_option(
"--flavor",
dest="flavor_id",
help="Unique identifier of the flavor"
" Use snf-manage flavor-list to find out"
" available flavors."),
make_option(
"--new-nics",
dest='new_nics',
default=False,
action="store_true",
help="Remove old NICs of instance, and create"
" a new NIC connected to a public network of"
" Synnefo.")
)
REQUIRED = ("user", "backend-id", "image", "flavor")
def handle(self, *args, **options):
if len(args) < 1:
raise CommandError("Please specify a Ganeti instance")
instance_name = args[0]
try:
id_from_instance_name(instance_name)
raise CommandError("%s is already a synnefo instance")
except:
pass
user_id = options['user_id']
backend_id = options['backend_id']
image_id = options['image_id']
flavor_id = options['flavor_id']
new_public_nic = options['new_nics']
for field in self.REQUIRED:
if not locals()[field.replace("-", "_")]:
raise CommandError(field + " is mandatory")
import_server(instance_name, backend_id, flavor_id, image_id, user_id,
new_public_nic, self.stderr)
def import_server(instance_name, backend_id, flavor_id, image_id, user_id,
new_public_nic, stream):
flavor = common.get_resource("flavor", flavor_id)
backend = common.get_resource("backend", backend_id)
backend_client = backend.get_client()
try:
instance = backend_client.GetInstance(instance_name)
except GanetiApiError as e:
if e.code == 404:
raise CommandError("Instance %s does not exist in backend %s"
% (instance_name, backend))
else:
raise CommandError("Unexpected error: %s" % e)
if not new_public_nic:
check_instance_nics(instance, stream)
shutdown_instance(instance, backend_client, stream=stream)
# Create the VM in DB
stream.write("Creating VM entry in DB\n")
vm = VirtualMachine.objects.create(name=instance_name,
backend=backend,
userid=user_id,
imageid=image_id,
flavor=flavor)
quotas.issue_and_accept_commission(vm)
if new_public_nic:
remove_instance_nics(instance, backend_client,
stream=stream)
# Rename instance
rename_instance(instance_name, vm.backend_vm_id, backend_client,
stream)
if new_public_nic:
ports = servers.create_instance_ports(user_id)
stream.write("Adding new NICs to server")
[servers.associate_port_with_machine(port, vm)
for port in ports]
[connect_to_network(vm, port) for port in ports]
# Startup instance
startup_instance(vm.backend_vm_id, backend_client, stream=stream)
backend.put_client(backend_client)
return
def flavor_from_instance(instance, flavor, stream):
beparams = instance['beparams']
disk_sizes = instance['disk.sizes']
if len(disk_sizes) != 1:
stream.write("Instance has more than one disk.\n")
disk = disk_sizes[0]
disk_template = instance['disk_template']
cpu = beparams['vcpus']
ram = beparams['memory']
try:
volume_type = VolumeType.objects.get(disk_template=disk_template)
except VolumeType.DoesNotExist:
raise CommandError("Cannot find volume type with '%s' disk template."
% disk_template)
return Flavor.objects.get_or_create(disk=disk,
volume_type=volume_type,
cpu=cpu, ram=ram)
def check_instance_nics(instance, stream):
instance_name = instance['name']
networks = instance['nic.networks.names']
stream.write(str(networks) + "\n")
try:
networks = map(id_from_network_name, networks)
except Network.InvalidBackendIdError:
raise CommandError("Instance %s has NICs that do not belong to a"
" network belonging to synnefo. Either manually"
" modify the instance NICs or specify --new-nics"
" to clear the old NICs and create a new NIC to"
" a public network of synnefo." % instance_name)
def remove_instance_nics(instance, backend_client, stream):
instance_name = instance['name']
ips = instance['nic.ips']
nic_indexes = xrange(0, len(ips))
op = map(lambda x: ('remove', x, {}), nic_indexes)
stream.write("Removing instance nics\n")
op.reverse()
jobid = backend_client.ModifyInstance(instance_name, nics=op)
(status, error) = wait_for_job(backend_client, jobid)
if status != 'success':
raise CommandError("Cannot remove instance NICs: %s" % error)
def add_public_nic(instance_name, nic, backend_client, stream):
stream.write("Adding public NIC %s\n" % nic)
jobid = backend_client.ModifyInstance(instance_name, nics=[('add', nic)])
(status, error) = wait_for_job(backend_client, jobid)
if status != 'success':
raise CommandError("Cannot rename instance: %s" % error)
def shutdown_instance(instance, backend_client, stream):
instance_name = instance['name']
if instance['status'] != 'ADMIN_down':
stream.write("Instance is not down. Shutting down instance...\n")
jobid = backend_client.ShutdownInstance(instance_name)
(status, error) = wait_for_job(backend_client, jobid)
if status != 'success':
raise CommandError("Cannot shutdown instance: %s" % error)
def rename_instance(old_name, new_name, backend_client, stream):
stream.write("Renaming instance to %s\n" % new_name)
jobid = backend_client.RenameInstance(old_name, new_name,
ip_check=False, name_check=False)
(status, error) = wait_for_job(backend_client, jobid)
if status != 'success':
raise CommandError("Cannot rename instance: %s" % error)
def startup_instance(name, backend_client, stream):
stream.write("Starting instance %s\n" % name)
jobid = backend_client.StartupInstance(name)
(status, error) = wait_for_job(backend_client, jobid)
if status != 'success':
raise CommandError("Cannot rename instance: %s" % error)
| gpl-3.0 |
sorenk/ansible | test/units/modules/packaging/os/test_rhn_channel.py | 101 | 5661 | # -*- coding: utf-8 -*-
# Copyright (c) 2017 Pierre-Louis Bonicoli <pierre-louis@libregerbil.fr>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from itertools import product
import json
from ansible.modules.packaging.os import rhn_channel
import pytest
pytestmark = pytest.mark.usefixtures('patch_ansible_module')
@pytest.mark.parametrize('patch_ansible_module', [{}], indirect=['patch_ansible_module'])
def test_without_required_parameters(capfd):
with pytest.raises(SystemExit):
rhn_channel.main()
out, err = capfd.readouterr()
results = json.loads(out)
assert results['failed']
assert 'missing required arguments' in results['msg']
TESTED_MODULE = rhn_channel.__name__
TEST_CASES = [
[
# add channel already added, check that result isn't changed
{
'name': 'rhel-x86_64-server-6',
'sysname': 'server01',
'url': 'https://rhn.redhat.com/rpc/api',
'user': 'user',
'password': 'pass',
},
{
'calls': [
('auth.login', ['X' * 43]),
('system.listUserSystems',
[[{'last_checkin': '2017-08-06 19:49:52.0', 'id': '0123456789', 'name': 'server01'}]]),
('channel.software.listSystemChannels',
[[{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}]]),
('auth.logout', [1]),
],
'changed': False,
'msg': 'Channel rhel-x86_64-server-6 already exists',
}
],
[
# add channel, check that result is changed
{
'name': 'rhel-x86_64-server-6-debuginfo',
'sysname': 'server01',
'url': 'https://rhn.redhat.com/rpc/api',
'user': 'user',
'password': 'pass',
},
{
'calls': [
('auth.login', ['X' * 43]),
('system.listUserSystems',
[[{'last_checkin': '2017-08-06 19:49:52.0', 'id': '0123456789', 'name': 'server01'}]]),
('channel.software.listSystemChannels',
[[{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}]]),
('channel.software.listSystemChannels',
[[{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}]]),
('system.setChildChannels', [1]),
('auth.logout', [1]),
],
'changed': True,
'msg': 'Channel rhel-x86_64-server-6-debuginfo added',
}
],
[
# remove inexistent channel, check that result isn't changed
{
'name': 'rhel-x86_64-server-6-debuginfo',
'state': 'absent',
'sysname': 'server01',
'url': 'https://rhn.redhat.com/rpc/api',
'user': 'user',
'password': 'pass',
},
{
'calls': [
('auth.login', ['X' * 43]),
('system.listUserSystems',
[[{'last_checkin': '2017-08-06 19:49:52.0', 'id': '0123456789', 'name': 'server01'}]]),
('channel.software.listSystemChannels',
[[{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}]]),
('auth.logout', [1]),
],
'changed': False,
'msg': 'Not subscribed to channel rhel-x86_64-server-6-debuginfo.',
}
],
[
# remove channel, check that result is changed
{
'name': 'rhel-x86_64-server-6-debuginfo',
'state': 'absent',
'sysname': 'server01',
'url': 'https://rhn.redhat.com/rpc/api',
'user': 'user',
'password': 'pass',
},
{
'calls': [
('auth.login', ['X' * 43]),
('system.listUserSystems',
[[{'last_checkin': '2017-08-06 19:49:52.0', 'id': '0123456789', 'name': 'server01'}]]),
('channel.software.listSystemChannels', [[
{'channel_name': 'RHEL Server Debuginfo (v.6 for x86_64)', 'channel_label': 'rhel-x86_64-server-6-debuginfo'},
{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}
]]),
('channel.software.listSystemChannels', [[
{'channel_name': 'RHEL Server Debuginfo (v.6 for x86_64)', 'channel_label': 'rhel-x86_64-server-6-debuginfo'},
{'channel_name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'channel_label': 'rhel-x86_64-server-6'}
]]),
('system.setChildChannels', [1]),
('auth.logout', [1]),
],
'changed': True,
'msg': 'Channel rhel-x86_64-server-6-debuginfo removed'
}
]
]
@pytest.mark.parametrize('patch_ansible_module, testcase', TEST_CASES, indirect=['patch_ansible_module'])
def test_rhn_channel(capfd, mocker, testcase, mock_request):
"""Check 'msg' and 'changed' results"""
with pytest.raises(SystemExit):
rhn_channel.main()
out, err = capfd.readouterr()
results = json.loads(out)
assert results['changed'] == testcase['changed']
assert results['msg'] == testcase['msg']
assert not testcase['calls'] # all calls should have been consumed
| gpl-3.0 |
DataONEorg/d1_python | client_cli/src/d1_cli/impl/command_processor.py | 1 | 20132 | #!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Process and execute CLI operations."""
import html.entities
import io
import os
import re
import requests
import d1_cli.impl.client
import d1_cli.impl.exceptions
import d1_cli.impl.format_ids
import d1_cli.impl.nodes
import d1_cli.impl.operation_maker
import d1_cli.impl.operation_queue
import d1_cli.impl.session
import d1_cli.impl.util
import d1_common.const
import d1_common.date_time
import d1_common.types.exceptions
import d1_common.url
import d1_common.xml
DEFAULT_PREFIX = ""
DEFAULT_PROMPT = "> "
SOLR_FORMAT_ID_NAME = "formatId"
class CommandProcessor:
def __init__(self):
self._nodes = d1_cli.impl.nodes.Nodes()
self._format_ids = d1_cli.impl.format_ids.FormatIDs()
self._session = d1_cli.impl.session.Session(self._nodes, self._format_ids)
self._session.load(suppress_error=True)
self._object_format_id_cache = None
self._operation_queue = d1_cli.impl.operation_queue.OperationQueue(
self._session
)
self._operation_maker = d1_cli.impl.operation_maker.OperationMaker(
self._session
)
def get_session(self):
return self._session
def get_operation_queue(self):
return self._operation_queue
def get_nodes(self):
return self._nodes
def get_format_ids(self):
return self._format_ids
# -----------------------------------------------------------------------------
# Operations against Coordinating Nodes
# -----------------------------------------------------------------------------
# Read operations.
def ping(self, hosts):
if not len(hosts):
self._ping_base(self._session.get(d1_cli.impl.session.CN_URL_NAME))
self._ping_base(self._session.get(d1_cli.impl.session.MN_URL_NAME))
else:
for host in hosts:
cn_base_url = d1_common.url.makeCNBaseURL(host)
mn_base_url = d1_common.url.makeMNBaseURL(host)
self._ping_base(cn_base_url)
if mn_base_url != cn_base_url:
self._ping_base(mn_base_url)
def search(self, line):
"""CN search."""
if self._session.get(d1_cli.impl.session.QUERY_ENGINE_NAME) == "solr":
return self._search_solr(line)
raise d1_cli.impl.exceptions.InvalidArguments(
"Unsupported query engine: {}".format(
self._session.get(d1_cli.impl.session.QUERY_ENGINE_NAME)
)
)
def list_format_ids(self):
cn_base_url = self._session.get(d1_cli.impl.session.CN_URL_NAME)
self._output(self._format_ids.format(cn_base_url))
def list_nodes(self):
cn_base_url = self._session.get(d1_cli.impl.session.CN_URL_NAME)
self._output(self._nodes.format(cn_base_url))
def resolve(self, pid):
"""Get Object Locations for Object."""
client = d1_cli.impl.client.CLICNClient(
**self._cn_client_connect_params_from_session()
)
object_location_list_pyxb = client.resolve(pid)
for location in object_location_list_pyxb.objectLocation:
d1_cli.impl.util.print_info(location.url)
# Write operations (queued)
def update_access_policy(self, pids):
for pid in pids:
self._queue_update_access_policy(pid)
def update_replication_policy(self, pids):
for pid in pids:
self._queue_update_replication_policy(pid)
# -----------------------------------------------------------------------------
# Operations against Member Nodes
# -----------------------------------------------------------------------------
# Read operations
def science_object_get(self, pid, path):
"""First try the MN set in the session.
Then try to resolve via the CN set in the session.
"""
mn_client = d1_cli.impl.client.CLIMNClient(
**self._mn_client_connect_params_from_session()
)
try:
response = mn_client.get(pid)
except d1_common.types.exceptions.DataONEException:
pass
else:
self._output(response, path)
return
cn_client = d1_cli.impl.client.CLICNClient(
**self._cn_client_connect_params_from_session()
)
object_location_list_pyxb = cn_client.resolve(pid)
for location in object_location_list_pyxb.objectLocation:
try:
params = self._mn_client_connect_params_from_session()
params["base_url"] = location.baseURL
mn_client = d1_cli.impl.client.CLIMNClient(**params)
response = mn_client.get(pid)
except d1_common.types.exceptions.DataONEException:
pass
else:
self._output(response, path)
return
raise d1_cli.impl.exceptions.CLIError("Could not find object: {}".format(pid))
def system_metadata_get(self, pid, path):
sysmeta_pyxb = None
try:
client = d1_cli.impl.client.CLICNClient(
**self._cn_client_connect_params_from_session()
)
sysmeta_pyxb = client.getSystemMetadata(pid)
except d1_common.types.exceptions.DataONEException:
pass
if sysmeta_pyxb is None:
try:
client = d1_cli.impl.client.CLIMNClient(
**self._mn_client_connect_params_from_session()
)
sysmeta_pyxb = client.getSystemMetadata(pid)
except d1_common.types.exceptions.DataONEException:
pass
if sysmeta_pyxb is None:
raise d1_cli.impl.exceptions.CLIError(
"Unable to get System Metadata: {}".format(pid)
)
self._system_metadata_print(sysmeta_pyxb, path)
def log(self, path):
client = d1_cli.impl.client.CLIMNClient(
**self._mn_client_connect_params_from_session()
)
log_pyxb = client.getLogRecords(
fromDate=self._session.get(d1_cli.impl.session.FROM_DATE_NAME),
toDate=self._session.get(d1_cli.impl.session.TO_DATE_NAME),
start=self._session.get(d1_cli.impl.session.START_NAME),
count=self._session.get(d1_cli.impl.session.COUNT_NAME),
)
log_xml = d1_common.xml.serialize_to_xml_str(log_pyxb)
self._output(io.StringIO(log_xml), path)
def list_objects(self, path):
client = d1_cli.impl.client.CLIMNClient(
**self._mn_client_connect_params_from_session()
)
object_list_pyxb = client.listObjects(
fromDate=self._session.get(d1_cli.impl.session.FROM_DATE_NAME),
toDate=self._session.get(d1_cli.impl.session.TO_DATE_NAME),
formatId=self._session.get(d1_cli.impl.session.SEARCH_FORMAT_NAME),
start=self._session.get(d1_cli.impl.session.START_NAME),
count=self._session.get(d1_cli.impl.session.COUNT_NAME),
)
object_list_xml = d1_common.xml.serialize_to_xml_str(object_list_pyxb)
self._output(io.StringIO(object_list_xml), path)
# Write operations (queued)
def science_object_create(self, pid, path, format_id=None):
"""Create a new Science Object on a Member Node."""
self._queue_science_object_create(pid, path, format_id)
def science_object_update(self, pid_old, path, pid_new, format_id=None):
"""Obsolete a Science Object on a Member Node with a different one."""
self._queue_science_object_update(pid_old, path, pid_new, format_id)
def create_package(self, pids):
self._queue_create_package(pids)
def science_object_archive(self, pids):
for pid in pids:
self._queue_science_object_archive(pid)
#
# Private.
#
def _output(self, file_like_object, path=None):
"""Display or save file like object."""
if not path:
self._output_to_display(file_like_object)
else:
self._output_to_file(file_like_object, path)
def _output_to_display(self, file_like_object):
for line in file_like_object:
d1_cli.impl.util.print_info(line.rstrip())
def _output_to_file(self, file_like_object, path):
abs_path = d1_cli.impl.util.os.path.expanduser(path)
if os.path.exists(abs_path):
if not d1_cli.impl.util.confirm(
'You are about to overwrite an existing file at "{}". Continue? '.format(
abs_path
),
default="yes",
):
d1_cli.impl.util.print_info("Cancelled")
if isinstance(file_like_object, requests.Response):
d1_cli.impl.util.copy_requests_stream_to_file(file_like_object, path)
else:
d1_cli.impl.util.copy_file_like_object_to_file(file_like_object, abs_path)
d1_cli.impl.util.print_info("Created file: {}".format(abs_path))
def _pretty(self, xml_doc):
return d1_common.xml.reformat_to_pretty_xml(xml_doc.decode("utf-8"))
def _system_metadata_print(self, sysmeta_pyxb, path=None):
sysmeta_xml = d1_common.xml.serialize_to_xml_str(sysmeta_pyxb)
if path is not None:
path = d1_cli.impl.util.os.path.expanduser(path)
self._output(io.StringIO(sysmeta_xml), path)
def _ping_base(self, base_url):
result = d1_cli.impl.client.CLIBaseClient(base_url).ping()
self._print_ping_result(result, base_url)
def _print_ping_result(self, result, url):
if result:
d1_cli.impl.util.print_info("Responded: {}".format(url))
else:
d1_cli.impl.util.print_error("Did not respond: {}".format(url))
def _search_solr(self, line):
"""Perform a SOLR search."""
try:
query_str = self._create_solr_query(line)
client = d1_cli.impl.client.CLICNClient(
**self._cn_client_connect_params_from_session()
)
object_list_pyxb = client.search(
queryType=d1_common.const.DEFAULT_SEARCH_ENGINE,
query=query_str,
start=self._session.get(d1_cli.impl.session.START_NAME),
rows=self._session.get(d1_cli.impl.session.COUNT_NAME),
)
d1_cli.impl.util.print_info(self._pretty(object_list_pyxb.toxml("utf-8")))
except d1_common.types.exceptions.ServiceFailure as e:
e = "%".join(str(e).splitlines()) # Flatten line
regexp = re.compile(
r"errorCode: (?P<error_code>\d+)%.*%Status code: (?P<status_code>\d+)"
)
result = regexp.search(e)
if (
(result is not None)
and (result.group("error_code") == "500")
and (result.group("status_code") == "400")
): # noqa: E129
result = re.search(
r"<b>description</b> <u>(?P<description>[^<]+)</u>", e
)
msg = re.sub(
"&([^;]+);",
lambda m: chr(html.entities.name2codepoint[m.group(1)]),
result.group("description"),
)
d1_cli.impl.util.print_info("Warning: %s" % msg)
else:
d1_cli.impl.util.print_error("Unexpected error:\n%s" % str(e))
def _create_solr_query(self, line):
"""Actual search - easier to test. """
p0 = ""
if line:
p0 = line.strip()
p1 = self._query_string_to_solr_filter(line)
p2 = self._object_format_to_solr_filter(line)
p3 = self._time_span_to_solr_filter()
result = p0 + p1 + p2 + p3
return result.strip()
def _query_string_to_solr_filter(self, line):
query = self._session.get(d1_cli.impl.session.QUERY_STRING_NAME)
if not query or query == "" or (query == "*:*" and len(line) > 0):
return ""
else:
return " " + query
def _time_span_to_solr_filter(self):
fromdate = self._session.get(d1_cli.impl.session.FROM_DATE_NAME)
todate = self._session.get(d1_cli.impl.session.TO_DATE_NAME)
return " dateModified:[{} TO {}]".format(
d1_common.date_time.http_datetime_str_from_dt(fromdate)
if fromdate
else "*",
d1_common.date_time.http_datetime_str_from_dt(todate) if todate else "*",
)
def _object_format_to_solr_filter(self, line):
search_format_id = self._session.get(d1_cli.impl.session.SEARCH_FORMAT_NAME)
if not search_format_id or search_format_id == "":
return ""
else:
if line.find(SOLR_FORMAT_ID_NAME) >= 0:
d1_cli.impl.util.print_warn(
"Using query format restriction instead: {}".format(
search_format_id
)
)
else:
return " %s:%s" % (SOLR_FORMAT_ID_NAME, search_format_id)
def _mn_client_connect_params_from_session(self):
return self._mn_cn_client_connect_params_from_session(
d1_cli.impl.session.MN_URL_NAME
)
def _cn_client_connect_params_from_session(self):
return self._mn_cn_client_connect_params_from_session(
d1_cli.impl.session.CN_URL_NAME
)
def _mn_cn_client_connect_params_from_session(self, url_name):
anonymous = self._session.get(d1_cli.impl.session.ANONYMOUS_NAME)
return {
"base_url": self._session.get(url_name),
"cert_pem_path": self._session.get(d1_cli.impl.session.CERT_FILENAME_NAME)
if not anonymous
else None,
"cert_key_path": self._session.get(d1_cli.impl.session.KEY_FILENAME_NAME)
if not anonymous
else None,
}
#
# Queuing of write operations
#
def _queue_science_object_create(self, pid, path, format_id):
create_operation = self._operation_maker.create(pid, path, format_id)
self._operation_queue.append(create_operation)
def _queue_science_object_update(self, pid_old, path, pid_new, format_id):
update_operation = self._operation_maker.update(
pid_old, path, pid_new, format_id
)
self._operation_queue.append(update_operation)
def _queue_create_package(self, pids):
archive_operation = self._operation_maker.create_package(pids)
self._operation_queue.append(archive_operation)
def _queue_science_object_archive(self, pid):
archive_operation = self._operation_maker.archive(pid)
self._operation_queue.append(archive_operation)
def _queue_update_access_policy(self, pid):
update_access_policy_operation = self._operation_maker.update_access_policy(pid)
self._operation_queue.append(update_access_policy_operation)
def _queue_update_replication_policy(self, pid):
update_replication_policy_operation = self._operation_maker.update_replication_policy(
pid
)
self._operation_queue.append(update_replication_policy_operation)
# def get_object_by_pid(session, pid, filename=None, resolve=True):
# """ Create a mnclient and look for the object. If the object is not found,
# simply return a None, don't throw an exception. If found, return the
# filename.
# """
# if session is None:
# raise exceptions.InvalidArguments(u'Missing session')
# if pid is None:
# raise exceptions.InvalidArguments(u'Missing pid')
# # Create member node client and try to get the object.
# mn_client = CLIMNClient(session)
# try:
# response = mn_client.get(pid)
# if response is not None:
# fname = _get_fname(filename)
# util.output(response, fname, session.is_verbose())
# return fname
# except d1_common.types.exceptions.DataONEException as e:
# if e.errorCode != 404:
# raise exceptions.CLIError(
# u'Unable to get resolve: {0}\n{1}'.format(pid, e.friendly_format()))
# if resolve:
# cn_client = CLICNClient(session)
# object_location_list = None
# try:
# object_location_list = cn_client.resolve(pid)
# if ((object_location_list is not None)
# and (len(object_location_list.objectLocation) > 0)):
# baseUrl = object_location_list.objectLocation[0].baseURL
# # If there is an object, go get it.
# mn_client = CLIMNClient(session, mn_url=baseUrl)
# response = mn_client.get(pid)
# if response is not None:
# fname = _get_fname(filename)
# util.output(response, os.path.expanduser(fname))
# return fname
# except d1_common.types.exceptions.DataONEException as e:
# if e.errorCode != 404:
# raise exceptions.CLIError(
# u'Unable to get resolve: {0}\n{1}'.format(pid, e.friendly_format()))
# # Nope, didn't find anything
# return None
#
#
#
#
# def get_baseUrl(session, nodeId):
# """ Get the base url of the given node id.
# """
# cn_client = CLICNClient(session)
# try:
# nodes = cn_client.listNodes()
# for node in list(nodes.node):
# if node.identifier.value() == nodeId:
# return node.baseURL
# except (d1_common.types.exceptions.ServiceFailure) as e:
# util.print_error("Unable to get node list.")
# return None
#
#
# def get_sys_meta_by_pid(session, pid, search_mn = False):
# """ Get the system metadata object for this particular pid.
# """
# if not session:
# raise exceptions.InvalidArguments(u'Missing session')
# if not pid:
# raise exceptions.InvalidArguments(u'Missing pid')
#
# sys_meta = None
# try:
# cn_client = CLICNClient(session)
# obsolete = True;
# while obsolete:
# obsolete = False;
# sys_meta = cn_client.getSystemMetadata(pid)
# if not sys_meta:
# return None
# if sys_meta.obsoletedBy:
# msg = (u'Object "%s" has been obsoleted by "%s". '
# + u'Would you rather use that?') % (pid, sys_meta.obsoletedBy)
# if not util.confirm(msg):
# break;
# pid = sys_meta.obsoletedBy
# obsolete = True
# return sys_meta
# except d1_common.types.exceptions.DataONEException as e:
# if e.errorCode != 404:
# raise exceptions.CLIError(
# u'Unable to get system metadata for: {0}\n{1}'.format(pid, e.friendly_format()))
# # Search the member node?
# if not sys_meta and (search_mn is not None) and search_mn:
# try:
# mn_client = CLIMNClient(session)
# obsolete = True;
# while obsolete:
# obsolete = False;
# sys_meta = mn_client.getSystemMetadata(pid)
# if not sys_meta:
# return None
# if sys_meta.obsoletedBy:
# msg = (u'Object "%s" has been obsoleted by "%s". '
# + u'Would you rather use that?') % (pid, sys_meta.obsoletedBy)
# if not util.confirm(msg):
# break;
# pid = sys_meta.obsoletedBy
# obsolete = True
# return sys_meta
# except d1_common.types.exceptions.DataONEException as e:
# if e.errorCode != 404:
# raise exceptions.CLIError(
# u'Unable to get system metadata for: {0}\n{1}'.format(pid, e.friendly_format()))
#
# return sys_meta
| apache-2.0 |
cristiana214/cristianachavez214-cristianachavez | python/src/Lib/shlex.py | 306 | 11137 | # -*- coding: iso-8859-1 -*-
"""A lexical analyzer class for simple shell-like syntaxes."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
# Input stacking and error message cleanup added by ESR, March 2000
# push_source() and pop_source() made explicit by ESR, January 2001.
# Posix compliance, split(), string arguments, and
# iterator interface by Gustavo Niemeyer, April 2003.
import os.path
import sys
from collections import deque
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
__all__ = ["shlex", "split"]
class shlex:
"A lexical analyzer class for simple shell-like syntaxes."
def __init__(self, instream=None, infile=None, posix=False):
if isinstance(instream, basestring):
instream = StringIO(instream)
if instream is not None:
self.instream = instream
self.infile = infile
else:
self.instream = sys.stdin
self.infile = None
self.posix = posix
if posix:
self.eof = None
else:
self.eof = ''
self.commenters = '#'
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_')
if self.posix:
self.wordchars += ('ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ')
self.whitespace = ' \t\r\n'
self.whitespace_split = False
self.quotes = '\'"'
self.escape = '\\'
self.escapedquotes = '"'
self.state = ' '
self.pushback = deque()
self.lineno = 1
self.debug = 0
self.token = ''
self.filestack = deque()
self.source = None
if self.debug:
print 'shlex: reading from %s, line %d' \
% (self.instream, self.lineno)
def push_token(self, tok):
"Push a token onto the stack popped by the get_token method"
if self.debug >= 1:
print "shlex: pushing token " + repr(tok)
self.pushback.appendleft(tok)
def push_source(self, newstream, newfile=None):
"Push an input source onto the lexer's input source stack."
if isinstance(newstream, basestring):
newstream = StringIO(newstream)
self.filestack.appendleft((self.infile, self.instream, self.lineno))
self.infile = newfile
self.instream = newstream
self.lineno = 1
if self.debug:
if newfile is not None:
print 'shlex: pushing to file %s' % (self.infile,)
else:
print 'shlex: pushing to stream %s' % (self.instream,)
def pop_source(self):
"Pop the input source stack."
self.instream.close()
(self.infile, self.instream, self.lineno) = self.filestack.popleft()
if self.debug:
print 'shlex: popping to %s, line %d' \
% (self.instream, self.lineno)
self.state = ' '
def get_token(self):
"Get a token from the input stream (or from stack if it's nonempty)"
if self.pushback:
tok = self.pushback.popleft()
if self.debug >= 1:
print "shlex: popping token " + repr(tok)
return tok
# No pushback. Get a token.
raw = self.read_token()
# Handle inclusions
if self.source is not None:
while raw == self.source:
spec = self.sourcehook(self.read_token())
if spec:
(newfile, newstream) = spec
self.push_source(newstream, newfile)
raw = self.get_token()
# Maybe we got EOF instead?
while raw == self.eof:
if not self.filestack:
return self.eof
else:
self.pop_source()
raw = self.get_token()
# Neither inclusion nor EOF
if self.debug >= 1:
if raw != self.eof:
print "shlex: token=" + repr(raw)
else:
print "shlex: token=EOF"
return raw
def read_token(self):
quoted = False
escapedstate = ' '
while True:
nextchar = self.instream.read(1)
if nextchar == '\n':
self.lineno = self.lineno + 1
if self.debug >= 3:
print "shlex: in state", repr(self.state), \
"I see character:", repr(nextchar)
if self.state is None:
self.token = '' # past end of file
break
elif self.state == ' ':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print "shlex: I see whitespace in whitespace state"
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.wordchars:
self.token = nextchar
self.state = 'a'
elif nextchar in self.quotes:
if not self.posix:
self.token = nextchar
self.state = nextchar
elif self.whitespace_split:
self.token = nextchar
self.state = 'a'
else:
self.token = nextchar
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.state in self.quotes:
quoted = True
if not nextchar: # end of file
if self.debug >= 2:
print "shlex: I see EOF in quotes state"
# XXX what error should be raised here?
raise ValueError, "No closing quotation"
if nextchar == self.state:
if not self.posix:
self.token = self.token + nextchar
self.state = ' '
break
else:
self.state = 'a'
elif self.posix and nextchar in self.escape and \
self.state in self.escapedquotes:
escapedstate = self.state
self.state = nextchar
else:
self.token = self.token + nextchar
elif self.state in self.escape:
if not nextchar: # end of file
if self.debug >= 2:
print "shlex: I see EOF in escape state"
# XXX what error should be raised here?
raise ValueError, "No escaped character"
# In posix shells, only the quote itself or the escape
# character may be escaped within quotes.
if escapedstate in self.quotes and \
nextchar != self.state and nextchar != escapedstate:
self.token = self.token + self.state
self.token = self.token + nextchar
self.state = escapedstate
elif self.state == 'a':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print "shlex: I see whitespace in word state"
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
if self.posix:
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.posix and nextchar in self.quotes:
self.state = nextchar
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.wordchars or nextchar in self.quotes \
or self.whitespace_split:
self.token = self.token + nextchar
else:
self.pushback.appendleft(nextchar)
if self.debug >= 2:
print "shlex: I see punctuation in word state"
self.state = ' '
if self.token:
break # emit current token
else:
continue
result = self.token
self.token = ''
if self.posix and not quoted and result == '':
result = None
if self.debug > 1:
if result:
print "shlex: raw token=" + repr(result)
else:
print "shlex: raw token=EOF"
return result
def sourcehook(self, newfile):
"Hook called on a filename to be sourced."
if newfile[0] == '"':
newfile = newfile[1:-1]
# This implements cpp-like semantics for relative-path inclusion.
if isinstance(self.infile, basestring) and not os.path.isabs(newfile):
newfile = os.path.join(os.path.dirname(self.infile), newfile)
return (newfile, open(newfile, "r"))
def error_leader(self, infile=None, lineno=None):
"Emit a C-compiler-like, Emacs-friendly error-message leader."
if infile is None:
infile = self.infile
if lineno is None:
lineno = self.lineno
return "\"%s\", line %d: " % (infile, lineno)
def __iter__(self):
return self
def next(self):
token = self.get_token()
if token == self.eof:
raise StopIteration
return token
def split(s, comments=False, posix=True):
lex = shlex(s, posix=posix)
lex.whitespace_split = True
if not comments:
lex.commenters = ''
return list(lex)
if __name__ == '__main__':
if len(sys.argv) == 1:
lexer = shlex()
else:
file = sys.argv[1]
lexer = shlex(open(file), file)
while 1:
tt = lexer.get_token()
if tt:
print "Token: " + repr(tt)
else:
break
| apache-2.0 |
zhwsh00/DirectFire-android | cocos2d-2.1beta3-x-2.1.0/cocos2dx/platform/third_party/marmalade/freetype/src/tools/docmaker/tohtml.py | 395 | 18731 | # ToHTML (c) 2002, 2003, 2005, 2006, 2007, 2008
# David Turner <david@freetype.org>
from sources import *
from content import *
from formatter import *
import time
# The following defines the HTML header used by all generated pages.
html_header_1 = """\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>\
"""
html_header_2 = """\
API Reference</title>
<style type="text/css">
body { font-family: Verdana, Geneva, Arial, Helvetica, serif;
color: #000000;
background: #FFFFFF; }
p { text-align: justify; }
h1 { text-align: center; }
li { text-align: justify; }
td { padding: 0 0.5em 0 0.5em; }
td.left { padding: 0 0.5em 0 0.5em;
text-align: left; }
a:link { color: #0000EF; }
a:visited { color: #51188E; }
a:hover { color: #FF0000; }
span.keyword { font-family: monospace;
text-align: left;
white-space: pre;
color: darkblue; }
pre.colored { color: blue; }
ul.empty { list-style-type: none; }
</style>
</head>
<body>
"""
html_header_3 = """
<table align=center><tr><td><font size=-1>[<a href="\
"""
html_header_3i = """
<table align=center><tr><td width="100%"></td>
<td><font size=-1>[<a href="\
"""
html_header_4 = """\
">Index</a>]</font></td>
<td width="100%"></td>
<td><font size=-1>[<a href="\
"""
html_header_5 = """\
">TOC</a>]</font></td></tr></table>
<center><h1>\
"""
html_header_5t = """\
">Index</a>]</font></td>
<td width="100%"></td></tr></table>
<center><h1>\
"""
html_header_6 = """\
API Reference</h1></center>
"""
# The HTML footer used by all generated pages.
html_footer = """\
</body>
</html>\
"""
# The header and footer used for each section.
section_title_header = "<center><h1>"
section_title_footer = "</h1></center>"
# The header and footer used for code segments.
code_header = '<pre class="colored">'
code_footer = '</pre>'
# Paragraph header and footer.
para_header = "<p>"
para_footer = "</p>"
# Block header and footer.
block_header = '<table align=center width="75%"><tr><td>'
block_footer_start = """\
</td></tr></table>
<hr width="75%">
<table align=center width="75%"><tr><td><font size=-2>[<a href="\
"""
block_footer_middle = """\
">Index</a>]</font></td>
<td width="100%"></td>
<td><font size=-2>[<a href="\
"""
block_footer_end = """\
">TOC</a>]</font></td></tr></table>
"""
# Description header/footer.
description_header = '<table align=center width="87%"><tr><td>'
description_footer = "</td></tr></table><br>"
# Marker header/inter/footer combination.
marker_header = '<table align=center width="87%" cellpadding=5><tr bgcolor="#EEEEFF"><td><em><b>'
marker_inter = "</b></em></td></tr><tr><td>"
marker_footer = "</td></tr></table>"
# Header location header/footer.
header_location_header = '<table align=center width="87%"><tr><td>'
header_location_footer = "</td></tr></table><br>"
# Source code extracts header/footer.
source_header = '<table align=center width="87%"><tr bgcolor="#D6E8FF"><td><pre>\n'
source_footer = "\n</pre></table><br>"
# Chapter header/inter/footer.
chapter_header = '<br><table align=center width="75%"><tr><td><h2>'
chapter_inter = '</h2><ul class="empty"><li>'
chapter_footer = '</li></ul></td></tr></table>'
# Index footer.
index_footer_start = """\
<hr>
<table><tr><td width="100%"></td>
<td><font size=-2>[<a href="\
"""
index_footer_end = """\
">TOC</a>]</font></td></tr></table>
"""
# TOC footer.
toc_footer_start = """\
<hr>
<table><tr><td><font size=-2>[<a href="\
"""
toc_footer_end = """\
">Index</a>]</font></td>
<td width="100%"></td>
</tr></table>
"""
# source language keyword coloration/styling
keyword_prefix = '<span class="keyword">'
keyword_suffix = '</span>'
section_synopsis_header = '<h2>Synopsis</h2>'
section_synopsis_footer = ''
# Translate a single line of source to HTML. This will convert
# a "<" into "<.", ">" into ">.", etc.
def html_quote( line ):
result = string.replace( line, "&", "&" )
result = string.replace( result, "<", "<" )
result = string.replace( result, ">", ">" )
return result
# same as 'html_quote', but ignores left and right brackets
def html_quote0( line ):
return string.replace( line, "&", "&" )
def dump_html_code( lines, prefix = "" ):
# clean the last empty lines
l = len( self.lines )
while l > 0 and string.strip( self.lines[l - 1] ) == "":
l = l - 1
# The code footer should be directly appended to the last code
# line to avoid an additional blank line.
print prefix + code_header,
for line in self.lines[0 : l + 1]:
print '\n' + prefix + html_quote( line ),
print prefix + code_footer,
class HtmlFormatter( Formatter ):
def __init__( self, processor, project_title, file_prefix ):
Formatter.__init__( self, processor )
global html_header_1, html_header_2, html_header_3
global html_header_4, html_header_5, html_footer
if file_prefix:
file_prefix = file_prefix + "-"
else:
file_prefix = ""
self.headers = processor.headers
self.project_title = project_title
self.file_prefix = file_prefix
self.html_header = html_header_1 + project_title + \
html_header_2 + \
html_header_3 + file_prefix + "index.html" + \
html_header_4 + file_prefix + "toc.html" + \
html_header_5 + project_title + \
html_header_6
self.html_index_header = html_header_1 + project_title + \
html_header_2 + \
html_header_3i + file_prefix + "toc.html" + \
html_header_5 + project_title + \
html_header_6
self.html_toc_header = html_header_1 + project_title + \
html_header_2 + \
html_header_3 + file_prefix + "index.html" + \
html_header_5t + project_title + \
html_header_6
self.html_footer = "<center><font size=""-2"">generated on " + \
time.asctime( time.localtime( time.time() ) ) + \
"</font></center>" + html_footer
self.columns = 3
def make_section_url( self, section ):
return self.file_prefix + section.name + ".html"
def make_block_url( self, block ):
return self.make_section_url( block.section ) + "#" + block.name
def make_html_words( self, words ):
""" convert a series of simple words into some HTML text """
line = ""
if words:
line = html_quote( words[0] )
for w in words[1:]:
line = line + " " + html_quote( w )
return line
def make_html_word( self, word ):
"""analyze a simple word to detect cross-references and styling"""
# look for cross-references
m = re_crossref.match( word )
if m:
try:
name = m.group( 1 )
rest = m.group( 2 )
block = self.identifiers[name]
url = self.make_block_url( block )
return '<a href="' + url + '">' + name + '</a>' + rest
except:
# we detected a cross-reference to an unknown item
sys.stderr.write( \
"WARNING: undefined cross reference '" + name + "'.\n" )
return '?' + name + '?' + rest
# look for italics and bolds
m = re_italic.match( word )
if m:
name = m.group( 1 )
rest = m.group( 3 )
return '<i>' + name + '</i>' + rest
m = re_bold.match( word )
if m:
name = m.group( 1 )
rest = m.group( 3 )
return '<b>' + name + '</b>' + rest
return html_quote( word )
def make_html_para( self, words ):
""" convert words of a paragraph into tagged HTML text, handle xrefs """
line = ""
if words:
line = self.make_html_word( words[0] )
for word in words[1:]:
line = line + " " + self.make_html_word( word )
# convert `...' quotations into real left and right single quotes
line = re.sub( r"(^|\W)`(.*?)'(\W|$)", \
r'\1‘\2’\3', \
line )
# convert tilde into non-breakable space
line = string.replace( line, "~", " " )
return para_header + line + para_footer
def make_html_code( self, lines ):
""" convert a code sequence to HTML """
line = code_header + '\n'
for l in lines:
line = line + html_quote( l ) + '\n'
return line + code_footer
def make_html_items( self, items ):
""" convert a field's content into some valid HTML """
lines = []
for item in items:
if item.lines:
lines.append( self.make_html_code( item.lines ) )
else:
lines.append( self.make_html_para( item.words ) )
return string.join( lines, '\n' )
def print_html_items( self, items ):
print self.make_html_items( items )
def print_html_field( self, field ):
if field.name:
print "<table><tr valign=top><td><b>" + field.name + "</b></td><td>"
print self.make_html_items( field.items )
if field.name:
print "</td></tr></table>"
def html_source_quote( self, line, block_name = None ):
result = ""
while line:
m = re_source_crossref.match( line )
if m:
name = m.group( 2 )
prefix = html_quote( m.group( 1 ) )
length = len( m.group( 0 ) )
if name == block_name:
# this is the current block name, if any
result = result + prefix + '<b>' + name + '</b>'
elif re_source_keywords.match( name ):
# this is a C keyword
result = result + prefix + keyword_prefix + name + keyword_suffix
elif self.identifiers.has_key( name ):
# this is a known identifier
block = self.identifiers[name]
result = result + prefix + '<a href="' + \
self.make_block_url( block ) + '">' + name + '</a>'
else:
result = result + html_quote( line[:length] )
line = line[length:]
else:
result = result + html_quote( line )
line = []
return result
def print_html_field_list( self, fields ):
print "<p></p>"
print "<table cellpadding=3 border=0>"
for field in fields:
if len( field.name ) > 22:
print "<tr valign=top><td colspan=0><b>" + field.name + "</b></td></tr>"
print "<tr valign=top><td></td><td>"
else:
print "<tr valign=top><td><b>" + field.name + "</b></td><td>"
self.print_html_items( field.items )
print "</td></tr>"
print "</table>"
def print_html_markup( self, markup ):
table_fields = []
for field in markup.fields:
if field.name:
# we begin a new series of field or value definitions, we
# will record them in the 'table_fields' list before outputting
# all of them as a single table
#
table_fields.append( field )
else:
if table_fields:
self.print_html_field_list( table_fields )
table_fields = []
self.print_html_items( field.items )
if table_fields:
self.print_html_field_list( table_fields )
#
# Formatting the index
#
def index_enter( self ):
print self.html_index_header
self.index_items = {}
def index_name_enter( self, name ):
block = self.identifiers[name]
url = self.make_block_url( block )
self.index_items[name] = url
def index_exit( self ):
# block_index already contains the sorted list of index names
count = len( self.block_index )
rows = ( count + self.columns - 1 ) / self.columns
print "<table align=center border=0 cellpadding=0 cellspacing=0>"
for r in range( rows ):
line = "<tr>"
for c in range( self.columns ):
i = r + c * rows
if i < count:
bname = self.block_index[r + c * rows]
url = self.index_items[bname]
line = line + '<td><a href="' + url + '">' + bname + '</a></td>'
else:
line = line + '<td></td>'
line = line + "</tr>"
print line
print "</table>"
print index_footer_start + \
self.file_prefix + "toc.html" + \
index_footer_end
print self.html_footer
self.index_items = {}
def index_dump( self, index_filename = None ):
if index_filename == None:
index_filename = self.file_prefix + "index.html"
Formatter.index_dump( self, index_filename )
#
# Formatting the table of content
#
def toc_enter( self ):
print self.html_toc_header
print "<center><h1>Table of Contents</h1></center>"
def toc_chapter_enter( self, chapter ):
print chapter_header + string.join( chapter.title ) + chapter_inter
print "<table cellpadding=5>"
def toc_section_enter( self, section ):
print '<tr valign=top><td class="left">'
print '<a href="' + self.make_section_url( section ) + '">' + \
section.title + '</a></td><td>'
print self.make_html_para( section.abstract )
def toc_section_exit( self, section ):
print "</td></tr>"
def toc_chapter_exit( self, chapter ):
print "</table>"
print chapter_footer
def toc_index( self, index_filename ):
print chapter_header + \
'<a href="' + index_filename + '">Global Index</a>' + \
chapter_inter + chapter_footer
def toc_exit( self ):
print toc_footer_start + \
self.file_prefix + "index.html" + \
toc_footer_end
print self.html_footer
def toc_dump( self, toc_filename = None, index_filename = None ):
if toc_filename == None:
toc_filename = self.file_prefix + "toc.html"
if index_filename == None:
index_filename = self.file_prefix + "index.html"
Formatter.toc_dump( self, toc_filename, index_filename )
#
# Formatting sections
#
def section_enter( self, section ):
print self.html_header
print section_title_header
print section.title
print section_title_footer
maxwidth = 0
for b in section.blocks.values():
if len( b.name ) > maxwidth:
maxwidth = len( b.name )
width = 70 # XXX magic number
if maxwidth <> 0:
# print section synopsis
print section_synopsis_header
print "<table align=center cellspacing=5 cellpadding=0 border=0>"
columns = width / maxwidth
if columns < 1:
columns = 1
count = len( section.block_names )
rows = ( count + columns - 1 ) / columns
for r in range( rows ):
line = "<tr>"
for c in range( columns ):
i = r + c * rows
line = line + '<td></td><td>'
if i < count:
name = section.block_names[i]
line = line + '<a href="#' + name + '">' + name + '</a>'
line = line + '</td>'
line = line + "</tr>"
print line
print "</table><br><br>"
print section_synopsis_footer
print description_header
print self.make_html_items( section.description )
print description_footer
def block_enter( self, block ):
print block_header
# place html anchor if needed
if block.name:
print '<h4><a name="' + block.name + '">' + block.name + '</a></h4>'
# dump the block C source lines now
if block.code:
header = ''
for f in self.headers.keys():
if block.source.filename.find( f ) >= 0:
header = self.headers[f] + ' (' + f + ')'
break;
# if not header:
# sys.stderr.write( \
# 'WARNING: No header macro for ' + block.source.filename + '.\n' )
if header:
print header_location_header
print 'Defined in ' + header + '.'
print header_location_footer
print source_header
for l in block.code:
print self.html_source_quote( l, block.name )
print source_footer
def markup_enter( self, markup, block ):
if markup.tag == "description":
print description_header
else:
print marker_header + markup.tag + marker_inter
self.print_html_markup( markup )
def markup_exit( self, markup, block ):
if markup.tag == "description":
print description_footer
else:
print marker_footer
def block_exit( self, block ):
print block_footer_start + self.file_prefix + "index.html" + \
block_footer_middle + self.file_prefix + "toc.html" + \
block_footer_end
def section_exit( self, section ):
print html_footer
def section_dump_all( self ):
for section in self.sections:
self.section_dump( section, self.file_prefix + section.name + '.html' )
# eof
| mit |
Inspq/ansible | lib/ansible/modules/system/selinux_permissive.py | 69 | 4355 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Michael Scherer <misc@zarb.org>
# inspired by code of github.com/dandiker/
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: selinux_permissive
short_description: Change permissive domain in SELinux policy
description:
- Add and remove domain from the list of permissive domain.
version_added: "2.0"
options:
domain:
description:
- "the domain that will be added or removed from the list of permissive domains"
required: true
permissive:
description:
- "indicate if the domain should or should not be set as permissive"
required: true
choices: [ 'True', 'False' ]
no_reload:
description:
- "automatically reload the policy after a change"
- "default is set to 'false' as that's what most people would want after changing one domain"
- "Note that this doesn't work on older version of the library (example EL 6), the module will silently ignore it in this case"
required: false
default: False
choices: [ 'True', 'False' ]
store:
description:
- "name of the SELinux policy store to use"
required: false
default: null
notes:
- Requires a version of SELinux recent enough ( ie EL 6 or newer )
requirements: [ policycoreutils-python ]
author: Michael Scherer <misc@zarb.org>
'''
EXAMPLES = '''
- selinux_permissive:
name: httpd_t
permissive: true
'''
HAVE_SEOBJECT = False
try:
import seobject
HAVE_SEOBJECT = True
except ImportError:
pass
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
def main():
module = AnsibleModule(
argument_spec=dict(
domain=dict(aliases=['name'], required=True),
store=dict(required=False, default=''),
permissive=dict(type='bool', required=True),
no_reload=dict(type='bool', required=False, default=False),
),
supports_check_mode=True
)
# global vars
changed = False
store = module.params['store']
permissive = module.params['permissive']
domain = module.params['domain']
no_reload = module.params['no_reload']
if not HAVE_SEOBJECT:
module.fail_json(changed=False, msg="policycoreutils-python required for this module")
try:
permissive_domains = seobject.permissiveRecords(store)
except ValueError:
e = get_exception()
module.fail_json(domain=domain, msg=str(e))
# not supported on EL 6
if 'set_reload' in dir(permissive_domains):
permissive_domains.set_reload(not no_reload)
try:
all_domains = permissive_domains.get_all()
except ValueError:
e = get_exception()
module.fail_json(domain=domain, msg=str(e))
if permissive:
if domain not in all_domains:
if not module.check_mode:
try:
permissive_domains.add(domain)
except ValueError:
e = get_exception()
module.fail_json(domain=domain, msg=str(e))
changed = True
else:
if domain in all_domains:
if not module.check_mode:
try:
permissive_domains.delete(domain)
except ValueError:
e = get_exception()
module.fail_json(domain=domain, msg=str(e))
changed = True
module.exit_json(changed=changed, store=store,
permissive=permissive, domain=domain)
if __name__ == '__main__':
main()
| gpl-3.0 |
aforalee/keystone | keystone/server/eventlet.py | 9 | 5534 |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import socket
from oslo_concurrency import processutils
from oslo_config import cfg
import oslo_i18n
from oslo_service import service
from oslo_service import systemd
import pbr.version
# NOTE(dstanek): i18n.enable_lazy() must be called before
# keystone.i18n._() is called to ensure it has the desired lazy lookup
# behavior. This includes cases, like keystone.exceptions, where
# keystone.i18n._() is called at import time.
oslo_i18n.enable_lazy()
from keystone.common import environment
from keystone.common import utils
from keystone import config
from keystone.i18n import _
from keystone.server import common
from keystone import service as keystone_service
CONF = cfg.CONF
class ServerWrapper(object):
"""Wraps a Server with some launching info & capabilities."""
def __init__(self, server, workers):
self.server = server
self.workers = workers
def launch_with(self, launcher):
self.server.listen()
if self.workers > 1:
# Use multi-process launcher
launcher.launch_service(self.server, self.workers)
else:
# Use single process launcher
launcher.launch_service(self.server)
def create_server(conf, name, host, port, workers):
app = keystone_service.loadapp('config:%s' % conf, name)
server = environment.Server(app, host=host, port=port,
keepalive=CONF.eventlet_server.tcp_keepalive,
keepidle=CONF.eventlet_server.tcp_keepidle)
if CONF.eventlet_server_ssl.enable:
server.set_ssl(CONF.eventlet_server_ssl.certfile,
CONF.eventlet_server_ssl.keyfile,
CONF.eventlet_server_ssl.ca_certs,
CONF.eventlet_server_ssl.cert_required)
return name, ServerWrapper(server, workers)
def serve(*servers):
logging.warning(_('Running keystone via eventlet is deprecated as of Kilo '
'in favor of running in a WSGI server (e.g. mod_wsgi). '
'Support for keystone under eventlet will be removed in '
'the "M"-Release.'))
if max([server[1].workers for server in servers]) > 1:
launcher = service.ProcessLauncher(CONF)
else:
launcher = service.ServiceLauncher(CONF)
for name, server in servers:
try:
server.launch_with(launcher)
except socket.error:
logging.exception(_('Failed to start the %(name)s server') % {
'name': name})
raise
# notify calling process we are ready to serve
systemd.notify_once()
for name, server in servers:
launcher.wait()
def _get_workers(worker_type_config_opt):
# Get the value from config, if the config value is None (not set), return
# the number of cpus with a minimum of 2.
worker_count = CONF.eventlet_server.get(worker_type_config_opt)
if not worker_count:
worker_count = max(2, processutils.get_worker_count())
return worker_count
def configure_threading():
monkeypatch_thread = not CONF.standard_threads
pydev_debug_url = utils.setup_remote_pydev_debug()
if pydev_debug_url:
# in order to work around errors caused by monkey patching we have to
# set the thread to False. An explanation is here:
# http://lists.openstack.org/pipermail/openstack-dev/2012-August/
# 000794.html
monkeypatch_thread = False
environment.use_eventlet(monkeypatch_thread)
def run(possible_topdir):
dev_conf = os.path.join(possible_topdir,
'etc',
'keystone.conf')
config_files = None
if os.path.exists(dev_conf):
config_files = [dev_conf]
common.configure(
version=pbr.version.VersionInfo('keystone').version_string(),
config_files=config_files,
pre_setup_logging_fn=configure_threading)
paste_config = config.find_paste_config()
def create_servers():
admin_worker_count = _get_workers('admin_workers')
public_worker_count = _get_workers('public_workers')
servers = []
servers.append(create_server(paste_config,
'admin',
CONF.eventlet_server.admin_bind_host,
CONF.eventlet_server.admin_port,
admin_worker_count))
servers.append(create_server(paste_config,
'main',
CONF.eventlet_server.public_bind_host,
CONF.eventlet_server.public_port,
public_worker_count))
return servers
_unused, servers = common.setup_backends(
startup_application_fn=create_servers)
serve(*servers)
| apache-2.0 |
shorelinedev/aosp_kernel_hammerhead | tools/perf/scripts/python/net_dropmonitor.py | 4235 | 1554 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
linecount = 0
for line in f:
linecount = linecount+1
f.seek(0)
except:
return
j = 0
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
j = j +1
if ((j % 100) == 0):
print "\r" + str(j) + "/" + str(linecount),
kallsyms.append({ 'loc': loc, 'name' : name})
print "\r" + str(j) + "/" + str(linecount)
kallsyms.sort()
return
def get_sym(sloc):
loc = int(sloc)
for i in kallsyms:
if (i['loc'] >= loc):
return (i['name'], i['loc']-loc)
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
Metrological/qtwebkit | Tools/QueueStatusServer/model/queuelog.py | 122 | 3843 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from time import time
from datetime import datetime
from google.appengine.ext import db
from model.workitems import WorkItems
from model.activeworkitems import ActiveWorkItems
class QueueLog(db.Model):
date = db.DateTimeProperty()
# duration specifies in seconds the time period these log values apply to.
duration = db.IntegerProperty()
queue_name = db.StringProperty()
bot_ids_seen = db.StringListProperty()
max_patches_waiting = db.IntegerProperty(default=0)
patch_wait_durations = db.ListProperty(int)
patch_process_durations = db.ListProperty(int)
patch_retry_count = db.IntegerProperty(default=0)
status_update_count = db.IntegerProperty(default=0)
@staticmethod
def create_key(queue_name, duration, timestamp):
return "%s-%s-%s" % (queue_name, duration, timestamp)
@classmethod
def get_at(cls, queue_name, duration, timestamp):
timestamp = int(timestamp / duration) * duration
date = datetime.utcfromtimestamp(timestamp)
key = cls.create_key(queue_name, duration, timestamp)
return cls.get_or_create(key, date=date, duration=duration, queue_name=queue_name)
@classmethod
def get_current(cls, queue_name, duration):
return cls.get_at(queue_name, duration, time())
# This is to prevent page requests from generating lots of rows in the database.
@classmethod
def get_or_create(cls, key_name, **kwargs):
return db.run_in_transaction(cls._get_or_create_txn, key_name, **kwargs)
def update_max_patches_waiting(self):
patches_waiting = self._get_patches_waiting(self.queue_name)
if patches_waiting > self.max_patches_waiting:
self.max_patches_waiting = patches_waiting
return True
return False
@classmethod
def _get_or_create_txn(cls, key_name, **kwargs):
entity = cls.get_by_key_name(key_name, parent=kwargs.get('parent'))
if entity is None:
entity = cls(key_name=key_name, **kwargs)
return entity
@classmethod
def _get_patches_waiting(cls, queue_name):
work_items = WorkItems.lookup_by_queue(queue_name)
active_work_items = ActiveWorkItems.lookup_by_queue(queue_name)
return len(set(work_items.item_ids) - set(active_work_items.item_ids))
| lgpl-3.0 |
osvalr/odoo | openerp/service/__init__.py | 380 | 1613 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2013 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import common
import db
import model
import report
import wsgi_server
import server
#.apidoc title: RPC Services
""" Classes of this module implement the network protocols that the
OpenERP server uses to communicate with remote clients.
Some classes are mostly utilities, whose API need not be visible to
the average user/developer. Study them only if you are about to
implement an extension to the network protocols, or need to debug some
low-level behavior of the wire.
"""
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
zenodo/zenodo | zenodo/modules/deposit/receivers.py | 2 | 2671 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Zenodo Deposit module receivers."""
from __future__ import absolute_import, print_function
from flask import current_app
from invenio_sipstore.models import RecordSIP
from zenodo.modules.deposit.tasks import datacite_register
from zenodo.modules.openaire.tasks import openaire_direct_index
from zenodo.modules.sipstore.tasks import archive_sip
def datacite_register_after_publish(sender, action=None, pid=None,
deposit=None):
"""Mind DOI with DataCite after the deposit has been published."""
if action == 'publish' and \
current_app.config['DEPOSIT_DATACITE_MINTING_ENABLED']:
recid_pid, record = deposit.fetch_published()
datacite_register.delay(recid_pid.pid_value, str(record.id))
def openaire_direct_index_after_publish(sender, action=None, pid=None,
deposit=None):
"""Send published record for direct indexing at OpenAIRE."""
if action == 'publish' and \
current_app.config['OPENAIRE_DIRECT_INDEXING_ENABLED']:
_, record = deposit.fetch_published()
openaire_direct_index.delay(record_uuid=str(record.id))
def sipstore_write_files_after_publish(sender, action=None, pid=None,
deposit=None):
"""Send the SIP for archiving."""
if action == 'publish' and \
current_app.config['SIPSTORE_ARCHIVER_WRITING_ENABLED']:
recid_pid, record = deposit.fetch_published()
sip = (
RecordSIP.query
.filter_by(pid_id=recid_pid.id)
.order_by(RecordSIP.created.desc())
.first().sip
)
archive_sip.delay(str(sip.id))
| gpl-2.0 |
mzizzi/ansible | test/units/playbook/role/test_include_role.py | 54 | 8862 | # (c) 2016, Daniel Miranda <danielkza2@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch
from ansible.playbook import Play
from ansible.playbook.task import Task
from ansible.vars.manager import VariableManager
from units.mock.loader import DictDataLoader
from units.mock.path import mock_unfrackpath_noop
def flatten_tasks(tasks):
for task in tasks:
if isinstance(task, Task):
yield task
else:
for t in flatten_tasks(task.block):
yield t
class TestIncludeRole(unittest.TestCase):
def setUp(self):
self.loader = DictDataLoader({
'/etc/ansible/roles/l1/tasks/main.yml': """
- shell: echo 'hello world from l1'
- include_role: name=l2
""",
'/etc/ansible/roles/l1/tasks/alt.yml': """
- shell: echo 'hello world from l1 alt'
- include_role: name=l2 tasks_from=alt defaults_from=alt
""",
'/etc/ansible/roles/l1/defaults/main.yml': """
test_variable: l1-main
l1_variable: l1-main
""",
'/etc/ansible/roles/l1/defaults/alt.yml': """
test_variable: l1-alt
l1_variable: l1-alt
""",
'/etc/ansible/roles/l2/tasks/main.yml': """
- shell: echo 'hello world from l2'
- include_role: name=l3
""",
'/etc/ansible/roles/l2/tasks/alt.yml': """
- shell: echo 'hello world from l2 alt'
- include_role: name=l3 tasks_from=alt defaults_from=alt
""",
'/etc/ansible/roles/l2/defaults/main.yml': """
test_variable: l2-main
l2_variable: l2-main
""",
'/etc/ansible/roles/l2/defaults/alt.yml': """
test_variable: l2-alt
l2_variable: l2-alt
""",
'/etc/ansible/roles/l3/tasks/main.yml': """
- shell: echo 'hello world from l3'
""",
'/etc/ansible/roles/l3/tasks/alt.yml': """
- shell: echo 'hello world from l3 alt'
""",
'/etc/ansible/roles/l3/defaults/main.yml': """
test_variable: l3-main
l3_variable: l3-main
""",
'/etc/ansible/roles/l3/defaults/alt.yml': """
test_variable: l3-alt
l3_variable: l3-alt
"""
})
self.var_manager = VariableManager(loader=self.loader)
def tearDown(self):
pass
def get_tasks_vars(self, play, tasks):
for task in flatten_tasks(tasks):
role = task._role
if not role:
continue
yield (role.get_name(),
self.var_manager.get_vars(play=play, task=task))
@patch('ansible.playbook.role.definition.unfrackpath',
mock_unfrackpath_noop)
def test_simple(self):
"""Test one-level include with default tasks and variables"""
play = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
tasks=[
{'include_role': 'name=l3'}
]
), loader=self.loader, variable_manager=self.var_manager)
tasks = play.compile()
for role, task_vars in self.get_tasks_vars(play, tasks):
self.assertEqual(task_vars.get('l3_variable'), 'l3-main')
self.assertEqual(task_vars.get('test_variable'), 'l3-main')
@patch('ansible.playbook.role.definition.unfrackpath',
mock_unfrackpath_noop)
def test_simple_alt_files(self):
"""Test one-level include with alternative tasks and variables"""
play = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
tasks=[{'include_role': 'name=l3 tasks_from=alt defaults_from=alt'}]),
loader=self.loader, variable_manager=self.var_manager)
tasks = play.compile()
for role, task_vars in self.get_tasks_vars(play, tasks):
self.assertEqual(task_vars.get('l3_variable'), 'l3-alt')
self.assertEqual(task_vars.get('test_variable'), 'l3-alt')
@patch('ansible.playbook.role.definition.unfrackpath',
mock_unfrackpath_noop)
def test_nested(self):
"""
Test nested includes with default tasks and variables.
Variables from outer roles should be inherited, but overridden in inner
roles.
"""
play = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
tasks=[
{'include_role': 'name=l1'}
]
), loader=self.loader, variable_manager=self.var_manager)
tasks = play.compile()
for role, task_vars in self.get_tasks_vars(play, tasks):
# Outer-most role must not have variables from inner roles yet
if role == 'l1':
self.assertEqual(task_vars.get('l1_variable'), 'l1-main')
self.assertEqual(task_vars.get('l2_variable'), None)
self.assertEqual(task_vars.get('l3_variable'), None)
self.assertEqual(task_vars.get('test_variable'), 'l1-main')
# Middle role must have variables from outer role, but not inner
elif role == 'l2':
self.assertEqual(task_vars.get('l1_variable'), 'l1-main')
self.assertEqual(task_vars.get('l2_variable'), 'l2-main')
self.assertEqual(task_vars.get('l3_variable'), None)
self.assertEqual(task_vars.get('test_variable'), 'l2-main')
# Inner role must have variables from both outer roles
elif role == 'l3':
self.assertEqual(task_vars.get('l1_variable'), 'l1-main')
self.assertEqual(task_vars.get('l2_variable'), 'l2-main')
self.assertEqual(task_vars.get('l3_variable'), 'l3-main')
self.assertEqual(task_vars.get('test_variable'), 'l3-main')
@patch('ansible.playbook.role.definition.unfrackpath',
mock_unfrackpath_noop)
def test_nested_alt_files(self):
"""
Test nested includes with alternative tasks and variables.
Variables from outer roles should be inherited, but overridden in inner
roles.
"""
play = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
tasks=[
{'include_role': 'name=l1 tasks_from=alt defaults_from=alt'}
]
), loader=self.loader, variable_manager=self.var_manager)
tasks = play.compile()
for role, task_vars in self.get_tasks_vars(play, tasks):
# Outer-most role must not have variables from inner roles yet
if role == 'l1':
self.assertEqual(task_vars.get('l1_variable'), 'l1-alt')
self.assertEqual(task_vars.get('l2_variable'), None)
self.assertEqual(task_vars.get('l3_variable'), None)
self.assertEqual(task_vars.get('test_variable'), 'l1-alt')
# Middle role must have variables from outer role, but not inner
elif role == 'l2':
self.assertEqual(task_vars.get('l1_variable'), 'l1-alt')
self.assertEqual(task_vars.get('l2_variable'), 'l2-alt')
self.assertEqual(task_vars.get('l3_variable'), None)
self.assertEqual(task_vars.get('test_variable'), 'l2-alt')
# Inner role must have variables from both outer roles
elif role == 'l3':
self.assertEqual(task_vars.get('l1_variable'), 'l1-alt')
self.assertEqual(task_vars.get('l2_variable'), 'l2-alt')
self.assertEqual(task_vars.get('l3_variable'), 'l3-alt')
self.assertEqual(task_vars.get('test_variable'), 'l3-alt')
| gpl-3.0 |
pepetreshere/odoo | addons/stock/wizard/stock_quantity_history.py | 6 | 1561 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import _, fields, models
from odoo.osv import expression
class StockQuantityHistory(models.TransientModel):
_name = 'stock.quantity.history'
_description = 'Stock Quantity History'
inventory_datetime = fields.Datetime('Inventory at Date',
help="Choose a date to get the inventory at that date",
default=fields.Datetime.now)
def open_at_date(self):
tree_view_id = self.env.ref('stock.view_stock_product_tree').id
form_view_id = self.env.ref('stock.product_form_view_procurement_button').id
domain = [('type', '=', 'product')]
product_id = self.env.context.get('product_id', False)
product_tmpl_id = self.env.context.get('product_tmpl_id', False)
if product_id:
domain = expression.AND([domain, [('id', '=', product_id)]])
elif product_tmpl_id:
domain = expression.AND([domain, [('product_tmpl_id', '=', product_tmpl_id)]])
# We pass `to_date` in the context so that `qty_available` will be computed across
# moves until date.
action = {
'type': 'ir.actions.act_window',
'views': [(tree_view_id, 'tree'), (form_view_id, 'form')],
'view_mode': 'tree,form',
'name': _('Products'),
'res_model': 'product.product',
'domain': domain,
'context': dict(self.env.context, to_date=self.inventory_datetime),
}
return action
| agpl-3.0 |
chalmers-revere/opendlv.miniature | thirdparty/cxxtest/doc/include_anchors.py | 54 | 2903 | #-------------------------------------------------------------------------
# CxxTest: A lightweight C++ unit testing library.
# Copyright (c) 2008 Sandia Corporation.
# This software is distributed under the LGPL License v3
# For more information, see the COPYING file in the top CxxTest directory.
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
#-------------------------------------------------------------------------
import re
import sys
import os.path
import os
pat1a = re.compile('include::([a-zA-Z0-9_\.\-/\/]+\/)\.([^\_]+)\_[a-zA-Z0-9]*\.py\[\]')
pat1b = re.compile('include::([a-zA-Z0-9_\.\-/\/]+\/)\.([^\_]+)\_[a-zA-Z0-9]*\.sh\[\]')
pat1c = re.compile('include::([a-zA-Z0-9_\.\-/\/]+\/)\.([^\_]+)\_[a-zA-Z0-9]*\.h\[\]')
pat1d = re.compile('include::([a-zA-Z0-9_\.\-/\/]+\/)\.([^\_]+)\_[a-zA-Z0-9]*\.cpp\[\]')
pat2 = re.compile('([^@]+)@([a-zA-Z0-9]+):')
pat3 = re.compile('([^@]+)@:([a-zA-Z0-9]+)')
processed = set()
def process(dir, root, suffix):
#print "PROCESS ",root, suffix
bname = "%s%s" % (dir, root)
global processed
if bname in processed:
return
#
anchors = {}
anchors[''] = open('%s.%s_.%s' % (dir, root, suffix), 'w')
INPUT = open('%s%s.%s' % (dir, root, suffix), 'r')
for line in INPUT:
m2 = pat2.match(line)
m3 = pat3.match(line)
if m2:
anchor = m2.group(2)
anchors[anchor] = open('%s.%s_%s.%s' % (dir, root, anchor, suffix), 'w')
elif m3:
anchor = m3.group(2)
anchors[anchor].close()
del anchors[anchor]
else:
for anchor in anchors:
os.write(anchors[anchor].fileno(), line)
INPUT.close()
for anchor in anchors:
if anchor != '':
print "ERROR: anchor '%s' did not terminate" % anchor
anchors[anchor].close()
#
processed.add(bname)
for file in sys.argv[1:]:
print "Processing file '%s' ..." % file
INPUT = open(file, 'r')
for line in INPUT:
suffix = None
m = pat1a.match(line)
if m:
suffix = 'py'
#
if suffix is None:
m = pat1b.match(line)
if m:
suffix = 'sh'
#
if suffix is None:
m = pat1c.match(line)
if m:
suffix = 'h'
#
if suffix is None:
m = pat1d.match(line)
if m:
suffix = 'cpp'
#
if not suffix is None:
#print "HERE", line, suffix
fname = m.group(1)+m.group(2)+'.'+suffix
if not os.path.exists(fname):
print line
print "ERROR: file '%s' does not exist!" % fname
sys.exit(1)
process(m.group(1), m.group(2), suffix)
INPUT.close()
| gpl-2.0 |
3dfxmadscientist/CBSS | addons/portal/tests/__init__.py | 177 | 1108 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_portal
checks = [
test_portal,
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 |
fajoy/horizon-example | openstack_dashboard/dashboards/project/volumes/tabs.py | 3 | 1719 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard.api import cinder
from openstack_dashboard.api import nova
class OverviewTab(tabs.Tab):
name = _("Overview")
slug = "overview"
template_name = ("project/volumes/"
"_detail_overview.html")
def get_context_data(self, request):
volume_id = self.tab_group.kwargs['volume_id']
try:
volume = cinder.volume_get(request, volume_id)
for att in volume.attachments:
att['instance'] = nova.server_get(request, att['server_id'])
except:
redirect = reverse('horizon:project:volumes:index')
exceptions.handle(self.request,
_('Unable to retrieve volume details.'),
redirect=redirect)
return {'volume': volume}
class VolumeDetailTabs(tabs.TabGroup):
slug = "volume_details"
tabs = (OverviewTab,)
| apache-2.0 |
procangroup/edx-platform | lms/djangoapps/learner_dashboard/tests/test_programs.py | 5 | 9285 | # -*- coding: utf-8 -*-
"""
Unit tests covering the program listing and detail pages.
"""
import json
import re
from urlparse import urljoin
from uuid import uuid4
import mock
from bs4 import BeautifulSoup
from django.conf import settings
from django.core.urlresolvers import reverse, reverse_lazy
from django.test import override_settings
from openedx.core.djangoapps.catalog.tests.factories import CourseFactory, CourseRunFactory, ProgramFactory
from openedx.core.djangoapps.catalog.tests.mixins import CatalogIntegrationMixin
from openedx.core.djangoapps.programs.tests.mixins import ProgramsApiConfigMixin
from openedx.core.djangolib.testing.utils import skip_unless_lms
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory as ModuleStoreCourseFactory
PROGRAMS_UTILS_MODULE = 'openedx.core.djangoapps.programs.utils'
@skip_unless_lms
@override_settings(MKTG_URLS={'ROOT': 'https://www.example.com'})
@mock.patch(PROGRAMS_UTILS_MODULE + '.get_programs')
class TestProgramListing(ProgramsApiConfigMixin, SharedModuleStoreTestCase):
"""Unit tests for the program listing page."""
maxDiff = None
password = 'test'
url = reverse_lazy('program_listing_view')
@classmethod
def setUpClass(cls):
super(TestProgramListing, cls).setUpClass()
cls.course = ModuleStoreCourseFactory()
course_run = CourseRunFactory(key=unicode(cls.course.id)) # pylint: disable=no-member
course = CourseFactory(course_runs=[course_run])
cls.first_program = ProgramFactory(courses=[course])
cls.second_program = ProgramFactory(courses=[course])
cls.data = sorted([cls.first_program, cls.second_program], key=cls.program_sort_key)
def setUp(self):
super(TestProgramListing, self).setUp()
self.user = UserFactory()
self.client.login(username=self.user.username, password=self.password)
@classmethod
def program_sort_key(cls, program):
"""
Helper function used to sort dictionaries representing programs.
"""
return program['title']
def load_serialized_data(self, response, key):
"""
Extract and deserialize serialized data from the response.
"""
pattern = re.compile(r'{key}: (?P<data>\[.*\])'.format(key=key))
match = pattern.search(response.content)
serialized = match.group('data')
return json.loads(serialized)
def assert_dict_contains_subset(self, superset, subset):
"""
Verify that the dict superset contains the dict subset.
Works like assertDictContainsSubset, deprecated since Python 3.2.
See: https://docs.python.org/2.7/library/unittest.html#unittest.TestCase.assertDictContainsSubset.
"""
superset_keys = set(superset.keys())
subset_keys = set(subset.keys())
intersection = {key: superset[key] for key in superset_keys & subset_keys}
self.assertEqual(subset, intersection)
def test_login_required(self, mock_get_programs):
"""
Verify that login is required to access the page.
"""
self.create_programs_config()
mock_get_programs.return_value = self.data
self.client.logout()
response = self.client.get(self.url)
self.assertRedirects(
response,
'{}?next={}'.format(reverse('signin_user'), self.url)
)
self.client.login(username=self.user.username, password=self.password)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_404_if_disabled(self, _mock_get_programs):
"""
Verify that the page 404s if disabled.
"""
self.create_programs_config(enabled=False)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 404)
def test_empty_state(self, mock_get_programs):
"""
Verify that the response contains no programs data when no programs are engaged.
"""
self.create_programs_config()
mock_get_programs.return_value = self.data
response = self.client.get(self.url)
self.assertContains(response, 'programsData: []')
def test_programs_listed(self, mock_get_programs):
"""
Verify that the response contains accurate programs data when programs are engaged.
"""
self.create_programs_config()
mock_get_programs.return_value = self.data
CourseEnrollmentFactory(user=self.user, course_id=self.course.id) # pylint: disable=no-member
response = self.client.get(self.url)
actual = self.load_serialized_data(response, 'programsData')
actual = sorted(actual, key=self.program_sort_key)
for index, actual_program in enumerate(actual):
expected_program = self.data[index]
self.assert_dict_contains_subset(actual_program, expected_program)
def test_program_discovery(self, mock_get_programs):
"""
Verify that a link to a programs marketing page appears in the response.
"""
self.create_programs_config(marketing_path='bar')
mock_get_programs.return_value = self.data
marketing_root = urljoin(settings.MKTG_URLS.get('ROOT'), 'bar').rstrip('/')
response = self.client.get(self.url)
self.assertContains(response, marketing_root)
def test_links_to_detail_pages(self, mock_get_programs):
"""
Verify that links to detail pages are present.
"""
self.create_programs_config()
mock_get_programs.return_value = self.data
CourseEnrollmentFactory(user=self.user, course_id=self.course.id) # pylint: disable=no-member
response = self.client.get(self.url)
actual = self.load_serialized_data(response, 'programsData')
actual = sorted(actual, key=self.program_sort_key)
for index, actual_program in enumerate(actual):
expected_program = self.data[index]
expected_url = reverse('program_details_view', kwargs={'program_uuid': expected_program['uuid']})
self.assertEqual(actual_program['detail_url'], expected_url)
@skip_unless_lms
@mock.patch(PROGRAMS_UTILS_MODULE + '.get_programs')
class TestProgramDetails(ProgramsApiConfigMixin, CatalogIntegrationMixin, SharedModuleStoreTestCase):
"""Unit tests for the program details page."""
program_uuid = str(uuid4())
password = 'test'
url = reverse_lazy('program_details_view', kwargs={'program_uuid': program_uuid})
@classmethod
def setUpClass(cls):
super(TestProgramDetails, cls).setUpClass()
modulestore_course = ModuleStoreCourseFactory()
course_run = CourseRunFactory(key=unicode(modulestore_course.id)) # pylint: disable=no-member
course = CourseFactory(course_runs=[course_run])
cls.data = ProgramFactory(uuid=cls.program_uuid, courses=[course])
def setUp(self):
super(TestProgramDetails, self).setUp()
self.user = UserFactory()
self.client.login(username=self.user.username, password=self.password)
def assert_program_data_present(self, response):
"""Verify that program data is present."""
self.assertContains(response, 'programData')
self.assertContains(response, 'urls')
self.assertContains(response, 'program_listing_url')
self.assertContains(response, self.data['title'])
self.assert_programs_tab_present(response)
def assert_programs_tab_present(self, response):
"""Verify that the programs tab is present in the nav."""
soup = BeautifulSoup(response.content, 'html.parser')
self.assertTrue(
any(soup.find_all('a', class_='tab-nav-link', href=reverse('program_listing_view')))
)
def test_login_required(self, mock_get_programs):
"""
Verify that login is required to access the page.
"""
self.create_programs_config()
catalog_integration = self.create_catalog_integration()
UserFactory(username=catalog_integration.service_username)
mock_get_programs.return_value = self.data
self.client.logout()
response = self.client.get(self.url)
self.assertRedirects(
response,
'{}?next={}'.format(reverse('signin_user'), self.url)
)
self.client.login(username=self.user.username, password=self.password)
response = self.client.get(self.url)
self.assert_program_data_present(response)
def test_404_if_disabled(self, _mock_get_programs):
"""
Verify that the page 404s if disabled.
"""
self.create_programs_config(enabled=False)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 404)
def test_404_if_no_data(self, mock_get_programs):
"""Verify that the page 404s if no program data is found."""
self.create_programs_config()
mock_get_programs.return_value = None
response = self.client.get(self.url)
self.assertEqual(response.status_code, 404)
| agpl-3.0 |
LordDamionDevil/Lony | lib/pip/_vendor/requests/packages/urllib3/packages/six.py | 2715 | 30098 | """Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.10.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| gpl-3.0 |
antinucleon/shadowsocks | shadowsocks/crypto/util.py | 1032 | 4287 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import logging
def find_library_nt(name):
# modified from ctypes.util
# ctypes.util.find_library just returns first result he found
# but we want to try them all
# because on Windows, users may have both 32bit and 64bit version installed
results = []
for directory in os.environ['PATH'].split(os.pathsep):
fname = os.path.join(directory, name)
if os.path.isfile(fname):
results.append(fname)
if fname.lower().endswith(".dll"):
continue
fname = fname + ".dll"
if os.path.isfile(fname):
results.append(fname)
return results
def find_library(possible_lib_names, search_symbol, library_name):
import ctypes.util
from ctypes import CDLL
paths = []
if type(possible_lib_names) not in (list, tuple):
possible_lib_names = [possible_lib_names]
lib_names = []
for lib_name in possible_lib_names:
lib_names.append(lib_name)
lib_names.append('lib' + lib_name)
for name in lib_names:
if os.name == "nt":
paths.extend(find_library_nt(name))
else:
path = ctypes.util.find_library(name)
if path:
paths.append(path)
if not paths:
# We may get here when find_library fails because, for example,
# the user does not have sufficient privileges to access those
# tools underlying find_library on linux.
import glob
for name in lib_names:
patterns = [
'/usr/local/lib*/lib%s.*' % name,
'/usr/lib*/lib%s.*' % name,
'lib%s.*' % name,
'%s.dll' % name]
for pat in patterns:
files = glob.glob(pat)
if files:
paths.extend(files)
for path in paths:
try:
lib = CDLL(path)
if hasattr(lib, search_symbol):
logging.info('loading %s from %s', library_name, path)
return lib
else:
logging.warn('can\'t find symbol %s in %s', search_symbol,
path)
except Exception:
pass
return None
def run_cipher(cipher, decipher):
from os import urandom
import random
import time
BLOCK_SIZE = 16384
rounds = 1 * 1024
plain = urandom(BLOCK_SIZE * rounds)
results = []
pos = 0
print('test start')
start = time.time()
while pos < len(plain):
l = random.randint(100, 32768)
c = cipher.update(plain[pos:pos + l])
results.append(c)
pos += l
pos = 0
c = b''.join(results)
results = []
while pos < len(plain):
l = random.randint(100, 32768)
results.append(decipher.update(c[pos:pos + l]))
pos += l
end = time.time()
print('speed: %d bytes/s' % (BLOCK_SIZE * rounds / (end - start)))
assert b''.join(results) == plain
def test_find_library():
assert find_library('c', 'strcpy', 'libc') is not None
assert find_library(['c'], 'strcpy', 'libc') is not None
assert find_library(('c',), 'strcpy', 'libc') is not None
assert find_library(('crypto', 'eay32'), 'EVP_CipherUpdate',
'libcrypto') is not None
assert find_library('notexist', 'strcpy', 'libnotexist') is None
assert find_library('c', 'symbol_not_exist', 'c') is None
assert find_library(('notexist', 'c', 'crypto', 'eay32'),
'EVP_CipherUpdate', 'libc') is not None
if __name__ == '__main__':
test_find_library()
| apache-2.0 |
deniszgonjanin/moviepy | moviepy/video/io/gif_writers.py | 14 | 9110 | import os
import subprocess as sp
from tqdm import tqdm
from moviepy.config import get_setting
from moviepy.decorators import (requires_duration,use_clip_fps_by_default)
from moviepy.tools import verbose_print, subprocess_call
import numpy as np
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, 'wb')
try:
import imageio
IMAGEIO_FOUND = True
except ImportError:
IMAGEIO_FOUND = False
@requires_duration
@use_clip_fps_by_default
def write_gif_with_tempfiles(clip, filename, fps=None, program= 'ImageMagick',
opt="OptimizeTransparency", fuzz=1, verbose=True,
loop=0, dispose=True, colors=None, tempfiles=False):
""" Write the VideoClip to a GIF file.
Converts a VideoClip into an animated GIF using ImageMagick
or ffmpeg. Does the same as write_gif (see this one for more
docstring), but writes every frame to a file instead of passing
them in the RAM. Useful on computers with little RAM.
"""
fileName, fileExtension = os.path.splitext(filename)
tt = np.arange(0,clip.duration, 1.0/fps)
tempfiles = []
verbose_print(verbose, "\n[MoviePy] Building file %s\n"%filename
+40*"-"+"\n")
verbose_print(verbose, "[MoviePy] Generating GIF frames...\n")
total = int(clip.duration*fps)+1
for i, t in tqdm(enumerate(tt), total=total):
name = "%s_GIFTEMP%04d.png"%(fileName, i+1)
tempfiles.append(name)
clip.save_frame(name, t, withmask=True)
delay = int(100.0/fps)
if program == "ImageMagick":
verbose_print(verbose, "[MoviePy] Optimizing GIF with ImageMagick... ")
cmd = [get_setting("IMAGEMAGICK_BINARY"),
'-delay' , '%d'%delay,
"-dispose" ,"%d"%(2 if dispose else 1),
"-loop" , "%d"%loop,
"%s_GIFTEMP*.png"%fileName,
"-coalesce",
"-layers", "%s"%opt,
"-fuzz", "%02d"%fuzz + "%",
]+(["-colors", "%d"%colors] if colors is not None else [])+[
filename]
elif program == "ffmpeg":
cmd = [get_setting("FFMPEG_BINARY"), '-y',
'-f', 'image2', '-r',str(fps),
'-i', fileName+'_GIFTEMP%04d.png',
'-r',str(fps),
filename]
try:
subprocess_call( cmd, verbose = verbose )
verbose_print(verbose, "[MoviePy] GIF %s is ready."%filename)
except (IOError,OSError) as err:
error = ("MoviePy Error: creation of %s failed because "
"of the following error:\n\n%s.\n\n."%(filename, str(err)))
if program == "ImageMagick":
error = error + ("This error can be due to the fact that "
"ImageMagick is not installed on your computer, or "
"(for Windows users) that you didn't specify the "
"path to the ImageMagick binary in file conf.py." )
raise IOError(error)
for f in tempfiles:
os.remove(f)
@requires_duration
@use_clip_fps_by_default
def write_gif(clip, filename, fps=None, program= 'ImageMagick',
opt="OptimizeTransparency", fuzz=1, verbose=True, withmask=True,
loop=0, dispose=True, colors=None):
""" Write the VideoClip to a GIF file, without temporary files.
Converts a VideoClip into an animated GIF using ImageMagick
or ffmpeg.
Parameters
-----------
filename
Name of the resulting gif file.
fps
Number of frames per second (see note below). If it
isn't provided, then the function will look for the clip's
``fps`` attribute (VideoFileClip, for instance, have one).
program
Software to use for the conversion, either 'ImageMagick' or
'ffmpeg'.
opt
(ImageMagick only) optimalization to apply, either
'optimizeplus' or 'OptimizeTransparency'.
fuzz
(ImageMagick only) Compresses the GIF by considering that
the colors that are less than fuzz% different are in fact
the same.
Notes
-----
The gif will be playing the clip in real time (you can
only change the frame rate). If you want the gif to be played
slower than the clip you will use ::
>>> # slow down clip 50% and make it a gif
>>> myClip.speedx(0.5).write_gif('myClip.gif')
"""
#
# We use processes chained with pipes.
#
# if program == 'ffmpeg'
# frames --ffmpeg--> gif
#
# if program == 'ImageMagick' and optimize == (None, False)
# frames --ffmpeg--> bmp frames --ImageMagick--> gif
#
#
# if program == 'ImageMagick' and optimize != (None, False)
# frames -ffmpeg-> bmp frames -ImagMag-> gif -ImagMag-> better gif
#
delay= 100.0/fps
if clip.mask is None:
withmask = False
cmd1 = [get_setting("FFMPEG_BINARY"), '-y', '-loglevel', 'error',
'-f', 'rawvideo',
'-vcodec','rawvideo', '-r', "%.02f"%fps,
'-s', "%dx%d"%(clip.w, clip.h),
'-pix_fmt', ('rgba' if withmask else 'rgb24'),
'-i', '-']
popen_params = {"stdout": DEVNULL,
"stderr": DEVNULL,
"stdin": DEVNULL}
if os.name == "nt":
popen_params["creationflags"] = 0x08000000
if program == "ffmpeg":
popen_params["stdin"] = sp.PIPE
popen_params["stdout"] = DEVNULL
proc1 = sp.Popen(cmd1+[ '-pix_fmt', ('rgba' if withmask else 'rgb24'),
'-r', "%.02f"%fps, filename], **popen_params)
else:
popen_params["stdin"] = sp.PIPE
popen_params["stdout"] = sp.PIPE
proc1 = sp.Popen(cmd1+ ['-f', 'image2pipe', '-vcodec', 'bmp', '-'],
**popen_params)
if program == 'ImageMagick':
cmd2 = [get_setting("IMAGEMAGICK_BINARY"), '-delay', "%.02f"%(delay),
"-dispose" ,"%d"%(2 if dispose else 1),
'-loop', '%d'%loop, '-', '-coalesce']
if (opt in [False, None]):
popen_params["stdin"] = proc1.stdout
popen_params["stdout"] = DEVNULL
proc2 = sp.Popen(cmd2+[filename], **popen_params)
else:
popen_params["stdin"] = proc1.stdout
popen_params["stdout"] = sp.PIPE
proc2 = sp.Popen(cmd2+['gif:-'], **popen_params)
if opt:
cmd3 = [get_setting("IMAGEMAGICK_BINARY"), '-', '-layers', opt,
'-fuzz', '%d'%fuzz+'%'
]+(["-colors", "%d"%colors] if colors is not None else [])+[
filename]
popen_params["stdin"] = proc2.stdout
popen_params["stdout"] = DEVNULL
proc3 = sp.Popen(cmd3, **popen_params)
# We send all the frames to the first process
verbose_print(verbose, "\n[MoviePy] >>>> Building file %s\n"%filename)
verbose_print(verbose, "[MoviePy] Generating GIF frames...\n")
try:
for t,frame in clip.iter_frames(fps=fps, progress_bar=True,
with_times=True, dtype="uint8"):
if withmask:
mask = 255 * clip.mask.get_frame(t)
frame = np.dstack([frame, mask]).astype('uint8')
proc1.stdin.write(frame.tostring())
except IOError as err:
error = ("[MoviePy] Error: creation of %s failed because "
"of the following error:\n\n%s.\n\n."%(filename, str(err)))
if program == "ImageMagick":
error = error + ("This can be due to the fact that "
"ImageMagick is not installed on your computer, or "
"(for Windows users) that you didn't specify the "
"path to the ImageMagick binary in file conf.py." )
raise IOError(error)
if program == 'ImageMagick':
verbose_print(verbose, "[MoviePy] Optimizing the GIF with ImageMagick...\n")
proc1.stdin.close()
proc1.wait()
if program == 'ImageMagick':
proc2.wait()
if opt:
proc3.wait()
verbose_print(verbose, "[MoviePy] >>>> File %s is ready !"%filename)
def write_gif_with_image_io(clip, filename, fps=None, opt='wu', loop=0,
colors=None, verbose=True):
"""
Writes the gif with the Python library ImageIO (calls FreeImage).
For the moment ImageIO is not installed with MoviePy. You need to install
imageio (pip install imageio) to use this.
Parameters
-----------
opt
"""
if colors is None:
colors=256
if not IMAGEIO_FOUND:
raise ImportError("Writing a gif with imageio requires ImageIO installed,"
" with e.g. 'pip install imageio'")
if fps is None:
fps = clip.fps
quantizer = 'wu' if opt!= 'nq' else 'nq'
writer = imageio.save(filename, duration=1.0/fps,
quantizer=quantizer, palettesize=colors)
verbose_print(verbose, "\n[MoviePy] Building file %s with imageio\n"%filename)
for frame in clip.iter_frames(fps=fps, progress_bar=True, dtype='uint8'):
writer.append_data(frame)
| mit |
j5shi/Thruster | plugins/python/lib/PyQt4/__init__.py | 1 | 1424 | # Copyright (c) 2010 Riverbank Computing Limited <info@riverbankcomputing.com>
#
# This file is part of PyQt.
#
# This file may be used under the terms of the GNU General Public
# License versions 2.0 or 3.0 as published by the Free Software
# Foundation and appearing in the files LICENSE.GPL2 and LICENSE.GPL3
# included in the packaging of this file. Alternatively you may (at
# your option) use any later version of the GNU General Public
# License if such license has been publicly approved by Riverbank
# Computing Limited (or its successors, if any) and the KDE Free Qt
# Foundation. In addition, as a special exception, Riverbank gives you
# certain additional rights. These rights are described in the Riverbank
# GPL Exception version 1.1, which can be found in the file
# GPL_EXCEPTION.txt in this package.
#
# Please review the following information to ensure GNU General
# Public Licensing requirements will be met:
# http://trolltech.com/products/qt/licenses/licensing/opensource/. If
# you are unsure which license is appropriate for your use, please
# review the following information:
# http://trolltech.com/products/qt/licenses/licensing/licensingoverview
# or contact the sales department at sales@riverbankcomputing.com.
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
| gpl-2.0 |
mhils/pytest | testing/test_monkeypatch.py | 29 | 7614 | import os, sys
import pytest
from _pytest.monkeypatch import monkeypatch as MonkeyPatch
def pytest_funcarg__mp(request):
cwd = os.getcwd()
sys_path = list(sys.path)
def cleanup():
sys.path[:] = sys_path
os.chdir(cwd)
request.addfinalizer(cleanup)
return MonkeyPatch()
def test_setattr():
class A:
x = 1
monkeypatch = MonkeyPatch()
pytest.raises(AttributeError, "monkeypatch.setattr(A, 'notexists', 2)")
monkeypatch.setattr(A, 'y', 2, raising=False)
assert A.y == 2
monkeypatch.undo()
assert not hasattr(A, 'y')
monkeypatch = MonkeyPatch()
monkeypatch.setattr(A, 'x', 2)
assert A.x == 2
monkeypatch.setattr(A, 'x', 3)
assert A.x == 3
monkeypatch.undo()
assert A.x == 1
A.x = 5
monkeypatch.undo() # double-undo makes no modification
assert A.x == 5
class TestSetattrWithImportPath:
def test_string_expression(self, monkeypatch):
monkeypatch.setattr("os.path.abspath", lambda x: "hello2")
assert os.path.abspath("123") == "hello2"
def test_string_expression_class(self, monkeypatch):
monkeypatch.setattr("_pytest.config.Config", 42)
import _pytest
assert _pytest.config.Config == 42
def test_unicode_string(self, monkeypatch):
monkeypatch.setattr("_pytest.config.Config", 42)
import _pytest
assert _pytest.config.Config == 42
monkeypatch.delattr("_pytest.config.Config")
def test_wrong_target(self, monkeypatch):
pytest.raises(TypeError, lambda: monkeypatch.setattr(None, None))
def test_unknown_import(self, monkeypatch):
pytest.raises(pytest.fail.Exception,
lambda: monkeypatch.setattr("unkn123.classx", None))
def test_unknown_attr(self, monkeypatch):
pytest.raises(pytest.fail.Exception,
lambda: monkeypatch.setattr("os.path.qweqwe", None))
def test_unknown_attr_non_raising(self, monkeypatch):
# https://github.com/pytest-dev/pytest/issues/746
monkeypatch.setattr('os.path.qweqwe', 42, raising=False)
assert os.path.qweqwe == 42
def test_delattr(self, monkeypatch):
monkeypatch.delattr("os.path.abspath")
assert not hasattr(os.path, "abspath")
monkeypatch.undo()
assert os.path.abspath
def test_delattr():
class A:
x = 1
monkeypatch = MonkeyPatch()
monkeypatch.delattr(A, 'x')
assert not hasattr(A, 'x')
monkeypatch.undo()
assert A.x == 1
monkeypatch = MonkeyPatch()
monkeypatch.delattr(A, 'x')
pytest.raises(AttributeError, "monkeypatch.delattr(A, 'y')")
monkeypatch.delattr(A, 'y', raising=False)
monkeypatch.setattr(A, 'x', 5, raising=False)
assert A.x == 5
monkeypatch.undo()
assert A.x == 1
def test_setitem():
d = {'x': 1}
monkeypatch = MonkeyPatch()
monkeypatch.setitem(d, 'x', 2)
monkeypatch.setitem(d, 'y', 1700)
monkeypatch.setitem(d, 'y', 1700)
assert d['x'] == 2
assert d['y'] == 1700
monkeypatch.setitem(d, 'x', 3)
assert d['x'] == 3
monkeypatch.undo()
assert d['x'] == 1
assert 'y' not in d
d['x'] = 5
monkeypatch.undo()
assert d['x'] == 5
def test_setitem_deleted_meanwhile():
d = {}
monkeypatch = MonkeyPatch()
monkeypatch.setitem(d, 'x', 2)
del d['x']
monkeypatch.undo()
assert not d
@pytest.mark.parametrize("before", [True, False])
def test_setenv_deleted_meanwhile(before):
key = "qwpeoip123"
if before:
os.environ[key] = "world"
monkeypatch = MonkeyPatch()
monkeypatch.setenv(key, 'hello')
del os.environ[key]
monkeypatch.undo()
if before:
assert os.environ[key] == "world"
del os.environ[key]
else:
assert key not in os.environ
def test_delitem():
d = {'x': 1}
monkeypatch = MonkeyPatch()
monkeypatch.delitem(d, 'x')
assert 'x' not in d
monkeypatch.delitem(d, 'y', raising=False)
pytest.raises(KeyError, "monkeypatch.delitem(d, 'y')")
assert not d
monkeypatch.setitem(d, 'y', 1700)
assert d['y'] == 1700
d['hello'] = 'world'
monkeypatch.setitem(d, 'x', 1500)
assert d['x'] == 1500
monkeypatch.undo()
assert d == {'hello': 'world', 'x': 1}
def test_setenv():
monkeypatch = MonkeyPatch()
monkeypatch.setenv('XYZ123', 2)
import os
assert os.environ['XYZ123'] == "2"
monkeypatch.undo()
assert 'XYZ123' not in os.environ
def test_delenv():
name = 'xyz1234'
assert name not in os.environ
monkeypatch = MonkeyPatch()
pytest.raises(KeyError, "monkeypatch.delenv(%r, raising=True)" % name)
monkeypatch.delenv(name, raising=False)
monkeypatch.undo()
os.environ[name] = "1"
try:
monkeypatch = MonkeyPatch()
monkeypatch.delenv(name)
assert name not in os.environ
monkeypatch.setenv(name, "3")
assert os.environ[name] == "3"
monkeypatch.undo()
assert os.environ[name] == "1"
finally:
if name in os.environ:
del os.environ[name]
def test_setenv_prepend():
import os
monkeypatch = MonkeyPatch()
monkeypatch.setenv('XYZ123', 2, prepend="-")
assert os.environ['XYZ123'] == "2"
monkeypatch.setenv('XYZ123', 3, prepend="-")
assert os.environ['XYZ123'] == "3-2"
monkeypatch.undo()
assert 'XYZ123' not in os.environ
def test_monkeypatch_plugin(testdir):
reprec = testdir.inline_runsource("""
def test_method(monkeypatch):
assert monkeypatch.__class__.__name__ == "monkeypatch"
""")
res = reprec.countoutcomes()
assert tuple(res) == (1, 0, 0), res
def test_syspath_prepend(mp):
old = list(sys.path)
mp.syspath_prepend('world')
mp.syspath_prepend('hello')
assert sys.path[0] == "hello"
assert sys.path[1] == "world"
mp.undo()
assert sys.path == old
mp.undo()
assert sys.path == old
def test_syspath_prepend_double_undo(mp):
mp.syspath_prepend('hello world')
mp.undo()
sys.path.append('more hello world')
mp.undo()
assert sys.path[-1] == 'more hello world'
def test_chdir_with_path_local(mp, tmpdir):
mp.chdir(tmpdir)
assert os.getcwd() == tmpdir.strpath
def test_chdir_with_str(mp, tmpdir):
mp.chdir(tmpdir.strpath)
assert os.getcwd() == tmpdir.strpath
def test_chdir_undo(mp, tmpdir):
cwd = os.getcwd()
mp.chdir(tmpdir)
mp.undo()
assert os.getcwd() == cwd
def test_chdir_double_undo(mp, tmpdir):
mp.chdir(tmpdir.strpath)
mp.undo()
tmpdir.chdir()
mp.undo()
assert os.getcwd() == tmpdir.strpath
def test_issue185_time_breaks(testdir):
testdir.makepyfile("""
import time
def test_m(monkeypatch):
def f():
raise Exception
monkeypatch.setattr(time, "time", f)
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines("""
*1 passed*
""")
class SampleNew(object):
@staticmethod
def hello():
return True
class SampleNewInherit(SampleNew):
pass
class SampleOld:
#oldstyle on python2
@staticmethod
def hello():
return True
class SampleOldInherit(SampleOld):
pass
@pytest.mark.parametrize('Sample', [
SampleNew, SampleNewInherit,
SampleOld, SampleOldInherit,
], ids=['new', 'new-inherit', 'old', 'old-inherit'])
def test_issue156_undo_staticmethod(Sample):
monkeypatch = MonkeyPatch()
monkeypatch.setattr(Sample, 'hello', None)
assert Sample.hello is None
monkeypatch.undo()
assert Sample.hello()
| mit |
MicroTrustRepos/microkernel | src/l4/pkg/python/contrib/Doc/includes/tzinfo-examples.py | 32 | 5063 | from datetime import tzinfo, timedelta, datetime
ZERO = timedelta(0)
HOUR = timedelta(hours=1)
# A UTC class.
class UTC(tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
utc = UTC()
# A class building tzinfo objects for fixed-offset time zones.
# Note that FixedOffset(0, "UTC") is a different way to build a
# UTC tzinfo object.
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes = offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
# A class capturing the platform's idea of local time.
import time as _time
STDOFFSET = timedelta(seconds = -_time.timezone)
if _time.daylight:
DSTOFFSET = timedelta(seconds = -_time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
class LocalTimezone(tzinfo):
def utcoffset(self, dt):
if self._isdst(dt):
return DSTOFFSET
else:
return STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, -1)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
Local = LocalTimezone()
# A complete implementation of current DST rules for major US time zones.
def first_sunday_on_or_after(dt):
days_to_go = 6 - dt.weekday()
if days_to_go:
dt += timedelta(days_to_go)
return dt
# US DST Rules
#
# This is a simplified (i.e., wrong for a few cases) set of rules for US
# DST start and end times. For a complete and up-to-date set of DST rules
# and timezone definitions, visit the Olson Database (or try pytz):
# http://www.twinsun.com/tz/tz-link.htm
# http://sourceforge.net/projects/pytz/ (might not be up-to-date)
#
# In the US, since 2007, DST starts at 2am (standard time) on the second
# Sunday in March, which is the first Sunday on or after Mar 8.
DSTSTART_2007 = datetime(1, 3, 8, 2)
# and ends at 2am (DST time; 1am standard time) on the first Sunday of Nov.
DSTEND_2007 = datetime(1, 11, 1, 1)
# From 1987 to 2006, DST used to start at 2am (standard time) on the first
# Sunday in April and to end at 2am (DST time; 1am standard time) on the last
# Sunday of October, which is the first Sunday on or after Oct 25.
DSTSTART_1987_2006 = datetime(1, 4, 1, 2)
DSTEND_1987_2006 = datetime(1, 10, 25, 1)
# From 1967 to 1986, DST used to start at 2am (standard time) on the last
# Sunday in April (the one on or after April 24) and to end at 2am (DST time;
# 1am standard time) on the last Sunday of October, which is the first Sunday
# on or after Oct 25.
DSTSTART_1967_1986 = datetime(1, 4, 24, 2)
DSTEND_1967_1986 = DSTEND_1987_2006
class USTimeZone(tzinfo):
def __init__(self, hours, reprname, stdname, dstname):
self.stdoffset = timedelta(hours=hours)
self.reprname = reprname
self.stdname = stdname
self.dstname = dstname
def __repr__(self):
return self.reprname
def tzname(self, dt):
if self.dst(dt):
return self.dstname
else:
return self.stdname
def utcoffset(self, dt):
return self.stdoffset + self.dst(dt)
def dst(self, dt):
if dt is None or dt.tzinfo is None:
# An exception may be sensible here, in one or both cases.
# It depends on how you want to treat them. The default
# fromutc() implementation (called by the default astimezone()
# implementation) passes a datetime with dt.tzinfo is self.
return ZERO
assert dt.tzinfo is self
# Find start and end times for US DST. For years before 1967, return
# ZERO for no DST.
if 2006 < dt.year:
dststart, dstend = DSTSTART_2007, DSTEND_2007
elif 1986 < dt.year < 2007:
dststart, dstend = DSTSTART_1987_2006, DSTEND_1987_2006
elif 1966 < dt.year < 1987:
dststart, dstend = DSTSTART_1967_1986, DSTEND_1967_1986
else:
return ZERO
start = first_sunday_on_or_after(dststart.replace(year=dt.year))
end = first_sunday_on_or_after(dstend.replace(year=dt.year))
# Can't compare naive to aware objects, so strip the timezone from
# dt first.
if start <= dt.replace(tzinfo=None) < end:
return HOUR
else:
return ZERO
Eastern = USTimeZone(-5, "Eastern", "EST", "EDT")
Central = USTimeZone(-6, "Central", "CST", "CDT")
Mountain = USTimeZone(-7, "Mountain", "MST", "MDT")
Pacific = USTimeZone(-8, "Pacific", "PST", "PDT")
| gpl-2.0 |
anryko/ansible | lib/ansible/module_utils/network/exos/argspec/l2_interfaces/l2_interfaces.py | 13 | 1555 | #
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The arg spec for the exos_l2_interfaces module
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
class L2_interfacesArgs(object): # pylint: disable=R0903
"""The arg spec for the exos_l2_interfaces module
"""
def __init__(self, **kwargs):
pass
argument_spec = {
'config': {
'elements': 'dict',
'options': {
'access': {'options': {'vlan': {'type': 'int'}},
'type': 'dict'},
'name': {'required': True, 'type': 'str'},
'trunk': {'options': {'native_vlan': {'type': 'int'}, 'trunk_allowed_vlans': {'type': 'list'}},
'type': 'dict'}},
'type': 'list'},
'state': {'choices': ['merged', 'replaced', 'overridden', 'deleted'], 'default': 'merged', 'type': 'str'}
} # pylint: disable=C0301
| gpl-3.0 |
fergalmoran/dss | spa/migrations/0003_auto__add_field_mix_duration.py | 1 | 16192 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Mix.duration'
db.add_column(u'spa_mix', 'duration',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Mix.duration'
db.delete_column(u'spa_mix', 'duration')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'spa._activity': {
'Meta': {'object_name': '_Activity'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'})
},
'spa._lookup': {
'Meta': {'object_name': '_Lookup'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'spa.chatmessage': {
'Meta': {'object_name': 'ChatMessage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'chat_messages'", 'null': 'True', 'to': "orm['spa.UserProfile']"})
},
'spa.comment': {
'Meta': {'object_name': 'Comment'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['spa.Mix']"}),
'time_index': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'spa.event': {
'Meta': {'object_name': 'Event'},
'attendees': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'attendees'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'date_created': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 4, 19, 0, 0)'}),
'event_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 4, 19, 0, 0)'}),
'event_description': ('tinymce.views.HTMLField', [], {}),
'event_recurrence': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.Recurrence']"}),
'event_time': ('django.db.models.fields.TimeField', [], {'default': 'datetime.datetime(2013, 4, 19, 0, 0)'}),
'event_title': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'event_venue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.Venue']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'spa.genre': {
'Meta': {'object_name': 'Genre'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'spa.label': {
'Meta': {'object_name': 'Label'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'spa.mix': {
'Meta': {'object_name': 'Mix'},
'description': ('django.db.models.fields.TextField', [], {}),
'download_allowed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'download_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'duration': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'genres': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['spa.Genre']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'local_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'mix_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'stream_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'uid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '38', 'blank': 'True'}),
'upload_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 4, 19, 0, 0)'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.UserProfile']"}),
'waveform_generated': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'spa.mixdownload': {
'Meta': {'object_name': 'MixDownload', '_ormbases': ['spa._Activity']},
u'_activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa._Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'downloads'", 'to': "orm['spa.Mix']"})
},
'spa.mixfavourite': {
'Meta': {'object_name': 'MixFavourite', '_ormbases': ['spa._Activity']},
u'_activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa._Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'favourites'", 'to': "orm['spa.Mix']"})
},
'spa.mixlike': {
'Meta': {'object_name': 'MixLike', '_ormbases': ['spa._Activity']},
u'_activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa._Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'likes'", 'to': "orm['spa.Mix']"})
},
'spa.mixplay': {
'Meta': {'object_name': 'MixPlay', '_ormbases': ['spa._Activity']},
u'_activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa._Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'plays'", 'to': "orm['spa.Mix']"})
},
'spa.purchaselink': {
'Meta': {'object_name': 'PurchaseLink'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'track': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'purchase_link'", 'to': "orm['spa.Tracklist']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'spa.recurrence': {
'Meta': {'object_name': 'Recurrence', '_ormbases': ['spa._Lookup']},
u'_lookup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa._Lookup']", 'unique': 'True', 'primary_key': 'True'})
},
'spa.release': {
'Meta': {'object_name': 'Release'},
'embed_code': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'release_artist': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'release_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 4, 19, 0, 0)'}),
'release_description': ('django.db.models.fields.TextField', [], {}),
'release_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'release_label': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.Label']"}),
'release_title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.UserProfile']"})
},
'spa.releaseaudio': {
'Meta': {'object_name': 'ReleaseAudio'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'local_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_audio'", 'null': 'True', 'to': "orm['spa.Release']"})
},
'spa.tracklist': {
'Meta': {'object_name': 'Tracklist'},
'artist': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.SmallIntegerField', [], {}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tracklist'", 'to': "orm['spa.Mix']"}),
'remixer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timeindex': ('django.db.models.fields.TimeField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'spa.userfollows': {
'Meta': {'object_name': 'UserFollows'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user_from': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'followers'", 'unique': 'True', 'to': "orm['spa.UserProfile']"}),
'user_to': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'following'", 'unique': 'True', 'to': "orm['spa.UserProfile']"})
},
'spa.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'activity_sharing': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'activity_sharing_networks': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'avatar_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'social'", 'max_length': '15'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '35', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '35', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
'spa.venue': {
'Meta': {'object_name': 'Venue'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'venue_address': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'venue_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'venue_name': ('django.db.models.fields.CharField', [], {'max_length': '250'})
}
}
complete_apps = ['spa'] | bsd-2-clause |
state-hiu/geonode-announcements | announcements/models.py | 2 | 2159 | from django.db import models
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
# support custom user models in django 1.5+
# https://docs.djangoproject.com/en/1.5/topics/auth/customizing/#substituting-a-custom-user-model
try:
from django.contrib.auth import get_user_model
except ImportError:
from django.contrib.auth.models import User
else:
User = get_user_model()
class Announcement(models.Model):
"""
A single announcement.
"""
DISMISSAL_NO = 1
DISMISSAL_SESSION = 2
DISMISSAL_PERMANENT = 3
DISMISSAL_CHOICES = [
(DISMISSAL_NO, _("No Dismissals Allowed")),
(DISMISSAL_SESSION, _("Session Only Dismissal")),
(DISMISSAL_PERMANENT, _("Permanent Dismissal Allowed"))
]
title = models.CharField(_("title"), max_length=50)
content = models.TextField(_("content"))
creator = models.ForeignKey(User, verbose_name=_("creator"))
creation_date = models.DateTimeField(_("creation_date"), default=timezone.now)
site_wide = models.BooleanField(_("site wide"), default=False)
members_only = models.BooleanField(_("members only"), default=False)
dismissal_type = models.IntegerField(choices=DISMISSAL_CHOICES, default=DISMISSAL_SESSION)
publish_start = models.DateTimeField(_("publish_start"), default=timezone.now)
publish_end = models.DateTimeField(_("publish_end"), blank=True, null=True)
def get_absolute_url(self):
return reverse("announcements_detail", args=[self.pk])
def dismiss_url(self):
if self.dismissal_type != Announcement.DISMISSAL_NO:
return reverse("announcements_dismiss", args=[self.pk])
def __unicode__(self):
return self.title
class Meta:
verbose_name = _("announcement")
verbose_name_plural = _("announcements")
class Dismissal(models.Model):
user = models.ForeignKey(User, related_name="announcement_dismissals")
announcement = models.ForeignKey(Announcement, related_name="dismissals")
dismissed_at = models.DateTimeField(default=timezone.now)
| mit |
kmoocdev2/edx-platform | lms/djangoapps/courseware/tests/test_context_processor.py | 13 | 1639 | """
Unit tests for courseware context_processor
"""
from django.contrib.auth.models import AnonymousUser
from mock import Mock
from courseware.context_processor import user_timezone_locale_prefs
from openedx.core.djangoapps.user_api.preferences.api import set_user_preference
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
class UserPrefContextProcessorUnitTest(ModuleStoreTestCase):
"""
Unit test for courseware context_processor
"""
shard = 4
def setUp(self):
super(UserPrefContextProcessorUnitTest, self).setUp()
self.user = UserFactory.create()
self.request = Mock()
self.request.user = self.user
def test_anonymous_user(self):
self.request.user = AnonymousUser()
context = user_timezone_locale_prefs(self.request)
self.assertIsNone(context['user_timezone'])
self.assertIsNone(context['user_language'])
def test_no_timezone_preference(self):
set_user_preference(self.user, 'pref-lang', 'en')
context = user_timezone_locale_prefs(self.request)
self.assertIsNone(context['user_timezone'])
self.assertIsNotNone(context['user_language'])
self.assertEqual(context['user_language'], 'en')
def test_no_language_preference(self):
set_user_preference(self.user, 'time_zone', 'Asia/Tokyo')
context = user_timezone_locale_prefs(self.request)
self.assertIsNone(context['user_language'])
self.assertIsNotNone(context['user_timezone'])
self.assertEqual(context['user_timezone'], 'Asia/Tokyo')
| agpl-3.0 |
jimporter/mkdocs | mkdocs/structure/files.py | 1 | 10501 | import fnmatch
import os
import logging
from functools import cmp_to_key
from urllib.parse import quote as urlquote
from mkdocs import utils
log = logging.getLogger(__name__)
log.addFilter(utils.warning_filter)
class Files:
""" A collection of File objects. """
def __init__(self, files):
self._files = files
self.src_paths = {file.src_path: file for file in files}
def __iter__(self):
return iter(self._files)
def __len__(self):
return len(self._files)
def __contains__(self, path):
return path in self.src_paths
def get_file_from_path(self, path):
""" Return a File instance with File.src_path equal to path. """
return self.src_paths.get(os.path.normpath(path))
def append(self, file):
""" Append file to Files collection. """
self._files.append(file)
self.src_paths[file.src_path] = file
def copy_static_files(self, dirty=False):
""" Copy static files from source to destination. """
for file in self:
if not file.is_documentation_page():
file.copy_file(dirty)
def documentation_pages(self):
""" Return iterable of all Markdown page file objects. """
return [file for file in self if file.is_documentation_page()]
def static_pages(self):
""" Return iterable of all static page file objects. """
return [file for file in self if file.is_static_page()]
def media_files(self):
""" Return iterable of all file objects which are not documentation or static pages. """
return [file for file in self if file.is_media_file()]
def javascript_files(self):
""" Return iterable of all javascript file objects. """
return [file for file in self if file.is_javascript()]
def css_files(self):
""" Return iterable of all CSS file objects. """
return [file for file in self if file.is_css()]
def add_files_from_theme(self, env, config):
""" Retrieve static files from Jinja environment and add to collection. """
def filter(name):
# '.*' filters dot files/dirs at root level whereas '*/.*' filters nested levels
patterns = ['.*', '*/.*', '*.py', '*.pyc', '*.html', '*readme*', 'mkdocs_theme.yml']
patterns.extend('*{}'.format(x) for x in utils.markdown_extensions)
patterns.extend(config['theme'].static_templates)
for pattern in patterns:
if fnmatch.fnmatch(name.lower(), pattern):
return False
return True
for path in env.list_templates(filter_func=filter):
# Theme files do not override docs_dir files
path = os.path.normpath(path)
if path not in self:
for dir in config['theme'].dirs:
# Find the first theme dir which contains path
if os.path.isfile(os.path.join(dir, path)):
self.append(File(path, dir, config['site_dir'], config['use_directory_urls']))
break
class File:
"""
A MkDocs File object.
Points to the source and destination locations of a file.
The `path` argument must be a path that exists relative to `src_dir`.
The `src_dir` and `dest_dir` must be absolute paths on the local file system.
The `use_directory_urls` argument controls how destination paths are generated. If `False`, a Markdown file is
mapped to an HTML file of the same name (the file extension is changed to `.html`). If True, a Markdown file is
mapped to an HTML index file (`index.html`) nested in a directory using the "name" of the file in `path`. The
`use_directory_urls` argument has no effect on non-Markdown files.
File objects have the following properties, which are Unicode strings:
File.src_path
The pure path of the source file relative to the source directory.
File.abs_src_path
The absolute concrete path of the source file.
File.dest_path
The pure path of the destination file relative to the destination directory.
File.abs_dest_path
The absolute concrete path of the destination file.
File.url
The url of the destination file relative to the destination directory as a string.
"""
def __init__(self, path, src_dir, dest_dir, use_directory_urls):
self.page = None
self.src_path = os.path.normpath(path)
self.abs_src_path = os.path.normpath(os.path.join(src_dir, self.src_path))
self.name = self._get_stem()
self.dest_path = self._get_dest_path(use_directory_urls)
self.abs_dest_path = os.path.normpath(os.path.join(dest_dir, self.dest_path))
self.url = self._get_url(use_directory_urls)
def __eq__(self, other):
def sub_dict(d):
return {key: value for key, value in d.items() if key in ['src_path', 'abs_src_path', 'url']}
return (isinstance(other, self.__class__) and sub_dict(self.__dict__) == sub_dict(other.__dict__))
def __ne__(self, other):
return not self.__eq__(other)
def _get_stem(self):
""" Return the name of the file without it's extension. """
filename = os.path.basename(self.src_path)
stem, ext = os.path.splitext(filename)
return 'index' if stem in ('index', 'README') else stem
def _get_dest_path(self, use_directory_urls):
""" Return destination path based on source path. """
if self.is_documentation_page():
if use_directory_urls:
parent, filename = os.path.split(self.src_path)
if self.name == 'index':
# index.md or README.md => index.html
return os.path.join(parent, 'index.html')
else:
# foo.md => foo/index.html
return os.path.join(parent, self.name, 'index.html')
else:
# foo.md => foo.html
root, ext = os.path.splitext(self.src_path)
return root + '.html'
return self.src_path
def _get_url(self, use_directory_urls):
""" Return url based in destination path. """
url = self.dest_path.replace(os.path.sep, '/')
dirname, filename = os.path.split(url)
if use_directory_urls and filename == 'index.html':
if dirname == '':
url = '.'
else:
url = dirname + '/'
return urlquote(url)
def url_relative_to(self, other):
""" Return url for file relative to other file. """
return utils.get_relative_url(self.url, other.url if isinstance(other, File) else other)
def copy_file(self, dirty=False):
""" Copy source file to destination, ensuring parent directories exist. """
if dirty and not self.is_modified():
log.debug("Skip copying unmodified file: '{}'".format(self.src_path))
else:
log.debug("Copying media file: '{}'".format(self.src_path))
utils.copy_file(self.abs_src_path, self.abs_dest_path)
def is_modified(self):
if os.path.isfile(self.abs_dest_path):
return os.path.getmtime(self.abs_dest_path) < os.path.getmtime(self.abs_src_path)
return True
def is_documentation_page(self):
""" Return True if file is a Markdown page. """
return os.path.splitext(self.src_path)[1] in utils.markdown_extensions
def is_static_page(self):
""" Return True if file is a static page (html, xml, json). """
return os.path.splitext(self.src_path)[1] in (
'.html',
'.htm',
'.xml',
'.json',
)
def is_media_file(self):
""" Return True if file is not a documentation or static page. """
return not (self.is_documentation_page() or self.is_static_page())
def is_javascript(self):
""" Return True if file is a JavaScript file. """
return os.path.splitext(self.src_path)[1] in (
'.js',
'.javascript',
)
def is_css(self):
""" Return True if file is a CSS file. """
return os.path.splitext(self.src_path)[1] in (
'.css',
)
def get_files(config):
""" Walk the `docs_dir` and return a Files collection. """
files = []
exclude = ['.*', '/templates']
for source_dir, dirnames, filenames in os.walk(config['docs_dir'], followlinks=True):
relative_dir = os.path.relpath(source_dir, config['docs_dir'])
for dirname in list(dirnames):
path = os.path.normpath(os.path.join(relative_dir, dirname))
# Skip any excluded directories
if _filter_paths(basename=dirname, path=path, is_dir=True, exclude=exclude):
dirnames.remove(dirname)
dirnames.sort()
for filename in _sort_files(filenames):
path = os.path.normpath(os.path.join(relative_dir, filename))
# Skip any excluded files
if _filter_paths(basename=filename, path=path, is_dir=False, exclude=exclude):
continue
# Skip README.md if an index file also exists in dir
if filename.lower() == 'readme.md' and 'index.md' in filenames:
log.warning("Both index.md and readme.md found. Skipping readme.md from {}".format(source_dir))
continue
files.append(File(path, config['docs_dir'], config['site_dir'], config['use_directory_urls']))
return Files(files)
def _sort_files(filenames):
""" Always sort `index` or `README` as first filename in list. """
def compare(x, y):
if x == y:
return 0
if os.path.splitext(y)[0] in ['index', 'README']:
return 1
if os.path.splitext(x)[0] in ['index', 'README'] or x < y:
return -1
return 1
return sorted(filenames, key=cmp_to_key(compare))
def _filter_paths(basename, path, is_dir, exclude):
""" .gitignore style file filtering. """
for item in exclude:
# Items ending in '/' apply only to directories.
if item.endswith('/') and not is_dir:
continue
# Items starting with '/' apply to the whole path.
# In any other cases just the basename is used.
match = path if item.startswith('/') else basename
if fnmatch.fnmatch(match, item.strip('/')):
return True
return False
| bsd-2-clause |
thaim/ansible | lib/ansible/plugins/terminal/vyos.py | 191 | 1700 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(br"\@[\w\-\.]+:\S+?[>#\$] ?$")
]
terminal_stderr_re = [
re.compile(br"\n\s*Invalid command:"),
re.compile(br"\nCommit failed"),
re.compile(br"\n\s+Set failed"),
]
terminal_length = os.getenv('ANSIBLE_VYOS_TERMINAL_LENGTH', 10000)
def on_open_shell(self):
try:
for cmd in (b'set terminal length 0', b'set terminal width 512'):
self._exec_cli_command(cmd)
self._exec_cli_command(b'set terminal length %d' % self.terminal_length)
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
| mit |
N6UDP/cslbot | cslbot/commands/stopwatch.py | 2 | 4831 | # Copyright (C) 2013-2015 Samuel Damashek, Peter Foley, James Forcier, Srijay Kasturi, Reed Koser, Christopher Reffett, and Fox Wilson
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from time import time
from datetime import timedelta
from ..helpers.orm import Stopwatches
from ..helpers import arguments
from ..helpers.command import Command
def create_stopwatch(args):
row = Stopwatches(time=time())
args.session.add(row)
args.session.flush()
return "Created new stopwatch with ID %d" % row.id
def get_elapsed(session, sw):
stopwatch = session.query(Stopwatches).get(sw)
if stopwatch is None:
return "No stopwatch exists with that ID!"
etime = stopwatch.elapsed
if stopwatch.active == 1:
etime = time() - stopwatch.time
return str(timedelta(seconds=etime))
def stop_stopwatch(args):
stopwatch = args.session.query(Stopwatches).get(args.id)
if stopwatch is None:
return "No stopwatch exists with that ID!"
if stopwatch.active == 0:
return "That stopwatch is already stopped!"
etime = stopwatch.elapsed
etime = time() - stopwatch.time
stopwatch.elapsed = etime
stopwatch.active = 0
return "Stopwatch stopped at %s" % get_elapsed(args.session, args.id)
def delete_stopwatch(args):
if not args.isadmin:
return "Nope, not gonna do it!"
stopwatch = args.session.query(Stopwatches).get(args.id)
if stopwatch is None:
return "No stopwatch exists with that ID!"
if stopwatch.active == 1:
return "That stopwatch is currently running!"
args.session.delete(stopwatch)
return "Stopwatch deleted!"
def resume_stopwatch(args):
stopwatch = args.session.query(Stopwatches).get(args.id)
if stopwatch is None:
return "No stopwatch exists with that ID!"
if stopwatch.active == 1:
return "That stopwatch is not paused!"
stopwatch.active = 1
stopwatch.time = time()
return "Stopwatch resumed!"
def list_stopwatch(args):
active = args.session.query(Stopwatches).filter(Stopwatches.active == 1).order_by(Stopwatches.id).all()
paused = args.session.query(Stopwatches).filter(Stopwatches.active == 0).order_by(Stopwatches.id).all()
for x in active:
args.send('Active stopwatch #%d started at %d' % (x.id, x.time), target=args.nick)
for x in paused:
args.send('Paused stopwatch #%d started at %d time elapsed %d' % (x.id, x.time, x.elapsed), target=args.nick)
return "%d active and %d paused stopwatches." % (len(active), len(paused))
def get_stopwatch(args):
stopwatch = args.session.query(Stopwatches).get(args.id)
if stopwatch is None:
return "Invalid ID!"
status = "Active" if stopwatch.active == 1 else "Paused"
return "%s %s" % (status, get_elapsed(args.session, args.id))
@Command(['stopwatch', 'sw'], ['config', 'db', 'is_admin', 'nick'])
def cmd(send, msg, args):
"""Start/stops/resume/get stopwatch
Syntax: {command} <start|stop|resume|delete|get|list>
"""
parser = arguments.ArgParser(args['config'])
parser.set_defaults(session=args['db'])
subparser = parser.add_subparsers()
start_parser = subparser.add_parser('start')
start_parser.set_defaults(func=create_stopwatch)
stop_parser = subparser.add_parser('stop')
stop_parser.add_argument('id', type=int)
stop_parser.set_defaults(func=stop_stopwatch)
resume_parser = subparser.add_parser('resume')
resume_parser.add_argument('id', type=int)
resume_parser.set_defaults(func=resume_stopwatch)
delete_parser = subparser.add_parser('delete')
delete_parser.add_argument('id', type=int)
delete_parser.set_defaults(func=delete_stopwatch, isadmin=args['is_admin'](args['nick']))
get_parser = subparser.add_parser('get')
get_parser.add_argument('id', type=int)
get_parser.set_defaults(func=get_stopwatch)
list_parser = subparser.add_parser('list')
list_parser.set_defaults(func=list_stopwatch, nick=args['nick'], send=send)
try:
cmdargs = parser.parse_args(msg)
except arguments.ArgumentException as e:
send(str(e))
return
send(cmdargs.func(cmdargs))
| gpl-2.0 |
Eureka22/ASM_xf | PythonD/lib/python2.4/idlelib/configSectionNameDialog.py | 150 | 3720 | """
Dialog that allows user to specify a new config file section name.
Used to get new highlight theme and keybinding set names.
"""
from Tkinter import *
import tkMessageBox
class GetCfgSectionNameDialog(Toplevel):
def __init__(self,parent,title,message,usedNames):
"""
message - string, informational message to display
usedNames - list, list of names already in use for validity check
"""
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
self.resizable(height=FALSE,width=FALSE)
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Cancel)
self.parent = parent
self.message=message
self.usedNames=usedNames
self.result=''
self.CreateWidgets()
self.withdraw() #hide while setting geometry
self.update_idletasks()
#needs to be done here so that the winfo_reqwidth is valid
self.messageInfo.config(width=self.frameMain.winfo_reqwidth())
self.geometry("+%d+%d" %
((parent.winfo_rootx()+((parent.winfo_width()/2)
-(self.winfo_reqwidth()/2)),
parent.winfo_rooty()+((parent.winfo_height()/2)
-(self.winfo_reqheight()/2)) )) ) #centre dialog over parent
self.deiconify() #geometry set, unhide
self.wait_window()
def CreateWidgets(self):
self.name=StringVar(self)
self.fontSize=StringVar(self)
self.frameMain = Frame(self,borderwidth=2,relief=SUNKEN)
self.frameMain.pack(side=TOP,expand=TRUE,fill=BOTH)
self.messageInfo=Message(self.frameMain,anchor=W,justify=LEFT,padx=5,pady=5,
text=self.message)#,aspect=200)
entryName=Entry(self.frameMain,textvariable=self.name,width=30)
entryName.focus_set()
self.messageInfo.pack(padx=5,pady=5)#,expand=TRUE,fill=BOTH)
entryName.pack(padx=5,pady=5)
frameButtons=Frame(self)
frameButtons.pack(side=BOTTOM,fill=X)
self.buttonOk = Button(frameButtons,text='Ok',
width=8,command=self.Ok)
self.buttonOk.grid(row=0,column=0,padx=5,pady=5)
self.buttonCancel = Button(frameButtons,text='Cancel',
width=8,command=self.Cancel)
self.buttonCancel.grid(row=0,column=1,padx=5,pady=5)
def NameOk(self):
#simple validity check for a sensible
#ConfigParser file section name
nameOk=1
name=self.name.get()
name.strip()
if not name: #no name specified
tkMessageBox.showerror(title='Name Error',
message='No name specified.', parent=self)
nameOk=0
elif len(name)>30: #name too long
tkMessageBox.showerror(title='Name Error',
message='Name too long. It should be no more than '+
'30 characters.', parent=self)
nameOk=0
elif name in self.usedNames:
tkMessageBox.showerror(title='Name Error',
message='This name is already in use.', parent=self)
nameOk=0
return nameOk
def Ok(self, event=None):
if self.NameOk():
self.result=self.name.get().strip()
self.destroy()
def Cancel(self, event=None):
self.result=''
self.destroy()
if __name__ == '__main__':
#test the dialog
root=Tk()
def run():
keySeq=''
dlg=GetCfgSectionNameDialog(root,'Get Name',
'The information here should need to be word wrapped. Test.')
print dlg.result
Button(root,text='Dialog',command=run).pack()
root.mainloop()
| gpl-2.0 |
rrrene/django | django/templatetags/cache.py | 471 | 3389 | from __future__ import unicode_literals
from django.core.cache import InvalidCacheBackendError, caches
from django.core.cache.utils import make_template_fragment_key
from django.template import (
Library, Node, TemplateSyntaxError, VariableDoesNotExist,
)
register = Library()
class CacheNode(Node):
def __init__(self, nodelist, expire_time_var, fragment_name, vary_on, cache_name):
self.nodelist = nodelist
self.expire_time_var = expire_time_var
self.fragment_name = fragment_name
self.vary_on = vary_on
self.cache_name = cache_name
def render(self, context):
try:
expire_time = self.expire_time_var.resolve(context)
except VariableDoesNotExist:
raise TemplateSyntaxError('"cache" tag got an unknown variable: %r' % self.expire_time_var.var)
try:
expire_time = int(expire_time)
except (ValueError, TypeError):
raise TemplateSyntaxError('"cache" tag got a non-integer timeout value: %r' % expire_time)
if self.cache_name:
try:
cache_name = self.cache_name.resolve(context)
except VariableDoesNotExist:
raise TemplateSyntaxError('"cache" tag got an unknown variable: %r' % self.cache_name.var)
try:
fragment_cache = caches[cache_name]
except InvalidCacheBackendError:
raise TemplateSyntaxError('Invalid cache name specified for cache tag: %r' % cache_name)
else:
try:
fragment_cache = caches['template_fragments']
except InvalidCacheBackendError:
fragment_cache = caches['default']
vary_on = [var.resolve(context) for var in self.vary_on]
cache_key = make_template_fragment_key(self.fragment_name, vary_on)
value = fragment_cache.get(cache_key)
if value is None:
value = self.nodelist.render(context)
fragment_cache.set(cache_key, value, expire_time)
return value
@register.tag('cache')
def do_cache(parser, token):
"""
This will cache the contents of a template fragment for a given amount
of time.
Usage::
{% load cache %}
{% cache [expire_time] [fragment_name] %}
.. some expensive processing ..
{% endcache %}
This tag also supports varying by a list of arguments::
{% load cache %}
{% cache [expire_time] [fragment_name] [var1] [var2] .. %}
.. some expensive processing ..
{% endcache %}
Optionally the cache to use may be specified thus::
{% cache .... using="cachename" %}
Each unique set of arguments will result in a unique cache entry.
"""
nodelist = parser.parse(('endcache',))
parser.delete_first_token()
tokens = token.split_contents()
if len(tokens) < 3:
raise TemplateSyntaxError("'%r' tag requires at least 2 arguments." % tokens[0])
if len(tokens) > 3 and tokens[-1].startswith('using='):
cache_name = parser.compile_filter(tokens[-1][len('using='):])
tokens = tokens[:-1]
else:
cache_name = None
return CacheNode(nodelist,
parser.compile_filter(tokens[1]),
tokens[2], # fragment_name can't be a variable.
[parser.compile_filter(t) for t in tokens[3:]],
cache_name,
)
| bsd-3-clause |
django-cratis/cratis | tests/test_init.py | 1 | 1147 | import os
from subprocess import check_output
from cratis.bootstrap import init_app, run_env_command
from tests._markers import slow
@slow
def test_init_in_empty_dir(tmpdir):
"""
Test check simple operations like open file by path,
load and save on existing file.
:param tmpdir:
:return:
"""
with tmpdir.as_cwd():
init_app(cratis_path=os.path.dirname(os.path.dirname(__file__)))
assert tmpdir.join('settings.py').exists()
assert tmpdir.join('.pyvenv').exists()
out = check_output(['.pyvenv/bin/django-manage', 'check'])
# make sure application is loading
assert b'System check identified no issues' in out
@slow
def test_init_with_settings_and_repo_package(tmpdir):
with tmpdir.as_cwd():
with tmpdir.join('settings.py').open('w') as f:
f.write(
"""
from cratis.settings import CratisConfig
from features import HelloFeature
class Dev(CratisConfig):
# boo
DEBUG = True
SECRET_KEY = '123'
""")
init_app()
# make sure file is not overriden
assert '# boo' in tmpdir.join('settings.py').read()
| bsd-2-clause |
vuolter/pyload | src/pyload/core/scheduler.py | 2 | 2825 | # -*- coding: utf-8 -*-
import time
from heapq import heappop, heappush
from threading import Lock
from _thread import start_new_thread
from .utils.struct.lock import lock
class AlreadyCalled(Exception):
pass
class Deferred:
def __init__(self):
self.call = []
self.result = ()
def add_callback(self, f, *cargs, **ckwargs):
self.call.append((f, cargs, ckwargs))
def callback(self, *args, **kwargs):
if self.result:
raise AlreadyCalled
self.result = (args, kwargs)
for f, cargs, ckwargs in self.call:
args += tuple(cargs)
kwargs.update(ckwargs)
f(*args ** kwargs)
class Scheduler:
def __init__(self, core):
self.pyload = core
self._ = core._
self.queue = PriorityQueue()
def add_job(self, t, call, args=[], kwargs={}, threaded=True):
d = Deferred()
t += time.time()
j = Job(t, call, args, kwargs, d, threaded)
self.queue.put((t, j))
return d
def remove_job(self, d):
"""
:param d: defered object
:return: if job was deleted
"""
index = -1
for i, j in enumerate(self.queue):
if j[1].deferred == d:
index = i
if index >= 0:
del self.queue[index]
return True
return False
def run(self):
while True:
t, j = self.queue.get()
if not j:
break
else:
if t <= time.time():
j.start()
else:
self.queue.put((t, j))
break
class Job:
def __init__(self, time, call, args=[], kwargs={}, deferred=None, threaded=True):
self.time = float(time)
self.call = call
self.args = args
self.kwargs = kwargs
self.deferred = deferred
self.threaded = threaded
def run(self):
ret = self.call(*self.args, **self.kwargs)
if self.deferred is None:
return
else:
self.deferred.callback(ret)
def start(self):
if self.threaded:
start_new_thread(self.run, ())
else:
self.run()
class PriorityQueue:
"""
a non blocking priority queue.
"""
def __init__(self):
self.queue = []
self.lock = Lock()
def __iter__(self):
return iter(self.queue)
def __delitem__(self, key):
del self.queue[key]
@lock
def put(self, element):
heappush(self.queue, element)
@lock
def get(self):
"""
return element or None.
"""
try:
el = heappop(self.queue)
return el
except IndexError:
return None, None
| agpl-3.0 |
RAPD/RAPD | src/old_agents/subcontractors/xdsme/new/xdsme-0.4.9/XIO/plugins/pycgtypes/vec4.py | 12 | 12333 | ####################################################################
# vec4 - 4-dimensional vector
#
# Copyright (C) 2002, Matthias Baas (baas@ira.uka.de)
#
# You may distribute under the terms of the BSD license, as
# specified in the file license.txt.
####################################################################
import types, math
# vec4
class vec4:
"""Four-dimensional vector.
This class represents a 4D vector.
"""
def __init__(self, *args):
"""Constructor.
There are several possibilities how to initialize a vector:
v = vec4() -> v = <0,0,0,0>
v = vec4(a) -> v = <a,a,a,a>
v = vec4(x,y) -> v = <x,y,0,0>
v = vec4(x,y,z) -> v = <x,y,z,0>
v = vec4(x,y,z,w) -> v = <x,y,z,w>
Note that specifying just one value sets all four components to
that value.
Additionally you can wrap those values in a list or a tuple or
specify them as a string:
v = vec4([1,2,3]) -> v = <1,2,3,0>
v = vec4("4,5") -> v = <4,5,0,0>
"""
if len(args)==0:
self.x, self.y, self.z, self.w = (0.0, 0.0, 0.0, 0.0)
elif len(args)==1:
T = type(args[0])
# scalar
if T==types.FloatType or T==types.IntType or T==types.LongType:
self.x, self.y, self.z, self.w = (args[0], args[0], args[0], args[0])
# vec4
elif isinstance(args[0], vec4):
self.x, self.y, self.z, self.w = args[0]
# Tuple/List
elif T==types.TupleType or T==types.ListType:
if len(args[0])==0:
self.x = self.y = self.z = self.w = 0.0
elif len(args[0])==1:
self.x = self.y = self.z = args[0][0]
self.w = 0.0
elif len(args[0])==2:
self.x, self.y = args[0]
self.z = 0.0
self.w = 0.0
elif len(args[0])==3:
self.x, self.y, self.z = args[0]
self.w = 0.0
elif len(args[0])==4:
self.x, self.y, self.z, self.w = args[0]
else:
raise TypeError, "vec4() takes at most 4 arguments"
# String
elif T==types.StringType:
s=args[0].replace(","," ").replace(" "," ").strip().split(" ")
if s==[""]:
s=[]
f=map(lambda x: float(x), s)
dummy = vec4(f)
self.x, self.y, self.z, self.w = dummy
# error
else:
raise TypeError,"vec4() arg can't be converted to vec4"
elif len(args)==2:
self.x, self.y = args
self.z, self.w = (0.0, 0.0)
elif len(args)==3:
self.x, self.y, self.z = args
self.w = 0.0
elif len(args)==4:
self.x, self.y, self.z, self.w = args
else:
raise TypeError, "vec4() takes at most 4 arguments"
def __repr__(self):
return 'vec4('+`self.x`+', '+`self.y`+', '+`self.z`+', '+`self.w`+')'
def __str__(self):
fmt="%1.4f"
return '('+fmt%self.x+', '+fmt%self.y+', '+fmt%self.z+', '+fmt%self.w+')'
def __eq__(self, other):
"""== operator
>>> a=vec4(1.0, 0.5, -1.8, 0.2)
>>> b=vec4(-0.3, 0.75, 0.5, 0.6)
>>> c=vec4(-0.3, 0.75, 0.5, 0.6)
>>> print a==b
0
>>> print b==c
1
>>> print a==None
0
"""
if isinstance(other, vec4):
return self.x==other.x and self.y==other.y and self.z==other.z
else:
return 0
def __ne__(self, other):
"""!= operator
>>> a=vec4(1.0, 0.5, -1.8, 0.2)
>>> b=vec4(-0.3, 0.75, 0.5, 0.6)
>>> c=vec4(-0.3, 0.75, 0.5, 0.6)
>>> print a!=b
1
>>> print b!=c
0
>>> print a!=None
1
"""
if isinstance(other, vec4):
return self.x!=other.x or self.y!=other.y or self.z!=other.z
else:
return 1
def __add__(self, other):
"""Vector addition.
>>> a=vec4(1.0, 0.5, -1.8, 0.2)
>>> b=vec4(-0.3, 0.75, 0.5, 0.3)
>>> print a+b
(0.7000, 1.2500, -1.3000, 0.5000)
"""
if isinstance(other, vec4):
return vec4(self.x+other.x, self.y+other.y, self.z+other.z, self.w+other.w)
else:
raise TypeError, "unsupported operand type for +"
def __sub__(self, other):
"""Vector subtraction.
>>> a=vec4(1.0, 0.5, -1.8, 0.2)
>>> b=vec4(-0.3, 0.75, 0.5, 0.3)
>>> print a-b
(1.3000, -0.2500, -2.3000, -0.1000)
"""
if isinstance(other, vec4):
return vec4(self.x-other.x, self.y-other.y, self.z-other.z, self.w-other.w)
else:
raise TypeError, "unsupported operand type for -"
def __mul__(self, other):
"""Multiplication with a scalar or dot product.
>>> a=vec4(1.0, 0.5, -1.8, 0.2)
>>> b=vec4(-0.3, 0.75, 0.5, 0.3)
>>> print a*2.0
(2.0000, 1.0000, -3.6000, 0.4000)
>>> print 2.0*a
(2.0000, 1.0000, -3.6000, 0.4000)
>>> print a*b
-0.765
"""
T = type(other)
# vec4*scalar
if T==types.FloatType or T==types.IntType or T==types.LongType:
return vec4(self.x*other, self.y*other, self.z*other, self.w*other)
# vec4*vec4
if isinstance(other, vec4):
return self.x*other.x + self.y*other.y + self.z*other.z + self.w*other.w
# unsupported
else:
# Try to delegate the operation to the other operand
if getattr(other,"__rmul__",None)!=None:
return other.__rmul__(self)
else:
raise TypeError, "unsupported operand type for *"
__rmul__ = __mul__
def __div__(self, other):
"""Division by scalar
>>> a=vec4(1.0, 0.5, -1.8, 0.2)
>>> print a/2.0
(0.5000, 0.2500, -0.9000, 0.1000)
"""
T = type(other)
# vec4/scalar
if T==types.FloatType or T==types.IntType or T==types.LongType:
return vec4(self.x/other, self.y/other, self.z/other, self.w/other)
# unsupported
else:
raise TypeError, "unsupported operand type for /"
def __mod__(self, other):
"""Modulo (component wise)
>>> a=vec4(3.0, 2.5, -1.8, 0.2)
>>> print a%2.0
(1.0000, 0.5000, 0.2000, 0.2000)
"""
T = type(other)
# vec4%scalar
if T==types.FloatType or T==types.IntType or T==types.LongType:
return vec4(self.x%other, self.y%other, self.z%other, self.w%other)
# unsupported
else:
raise TypeError, "unsupported operand type for %"
def __iadd__(self, other):
"""Inline vector addition.
>>> a=vec4(1.0, 0.5, -1.8, 0.2)
>>> b=vec4(-0.3, 0.75, 0.5, 0.3)
>>> a+=b
>>> print a
(0.7000, 1.2500, -1.3000, 0.5000)
"""
if isinstance(other, vec4):
self.x+=other.x
self.y+=other.y
self.z+=other.z
self.w+=other.w
return self
else:
raise TypeError, "unsupported operand type for +="
def __isub__(self, other):
"""Inline vector subtraction.
>>> a=vec4(1.0, 0.5, -1.8, 0.2)
>>> b=vec4(-0.3, 0.75, 0.5, 0.3)
>>> a-=b
>>> print a
(1.3000, -0.2500, -2.3000, -0.1000)
"""
if isinstance(other, vec4):
self.x-=other.x
self.y-=other.y
self.z-=other.z
self.w-=other.w
return self
else:
raise TypeError, "unsupported operand type for -="
def __imul__(self, other):
"""Inline multiplication (only with scalar)
>>> a=vec4(1.0, 0.5, -1.8, 0.2)
>>> a*=2.0
>>> print a
(2.0000, 1.0000, -3.6000, 0.4000)
"""
T = type(other)
# vec4*=scalar
if T==types.FloatType or T==types.IntType or T==types.LongType:
self.x*=other
self.y*=other
self.z*=other
self.w*=other
return self
else:
raise TypeError, "unsupported operand type for *="
def __idiv__(self, other):
"""Inline division with scalar
>>> a=vec4(1.0, 0.5, -1.8, 0.2)
>>> a/=2.0
>>> print a
(0.5000, 0.2500, -0.9000, 0.1000)
"""
T = type(other)
# vec4/=scalar
if T==types.FloatType or T==types.IntType or T==types.LongType:
self.x/=other
self.y/=other
self.z/=other
self.w/=other
return self
else:
raise TypeError, "unsupported operand type for /="
def __imod__(self, other):
"""Inline modulo
>>> a=vec4(3.0, 2.5, -1.8, 0.2)
>>> a%=2.0
>>> print a
(1.0000, 0.5000, 0.2000, 0.2000)
"""
T = type(other)
# vec4%=scalar
if T==types.FloatType or T==types.IntType or T==types.LongType:
self.x%=other
self.y%=other
self.z%=other
self.w%=other
return self
else:
raise TypeError, "unsupported operand type for %="
def __neg__(self):
"""Negation
>>> a=vec4(3.0, 2.5, -1.8, 0.2)
>>> print -a
(-3.0000, -2.5000, 1.8000, -0.2000)
"""
return vec4(-self.x, -self.y, -self.z, -self.w)
def __pos__(self):
"""
>>> a=vec4(3.0, 2.5, -1.8, 0.2)
>>> print +a
(3.0000, 2.5000, -1.8000, 0.2000)
"""
return vec4(+self.x, +self.y, +self.z, +self.w)
def __abs__(self):
"""Return the length of the vector.
abs(v) is equivalent to v.length().
>>> a=vec4(1.0, 0.5, -1.8, 0.2)
>>> print abs(a)
2.12837966538
"""
return math.sqrt(self*self)
def __len__(self):
"""Length of the sequence (always 4)"""
return 4
def __getitem__(self, key):
"""Return a component by index (0-based)
>>> a=vec4(1.0, 0.5, -1.8, 0.2)
>>> print a[0]
1.0
>>> print a[1]
0.5
>>> print a[2]
-1.8
>>> print a[3]
0.2
"""
T=type(key)
if T!=types.IntType and T!=types.LongType:
raise TypeError, "index must be integer"
if key==0: return self.x
elif key==1: return self.y
elif key==2: return self.z
elif key==3: return self.w
else:
raise IndexError,"index out of range"
def __setitem__(self, key, value):
"""Set a component by index (0-based)
>>> a=vec4()
>>> a[0]=1.5; a[1]=0.7; a[2]=-0.3; a[3]=0.2
>>> print a
(1.5000, 0.7000, -0.3000, 0.2000)
"""
T=type(key)
if T!=types.IntType and T!=types.LongType:
raise TypeError, "index must be integer"
if key==0: self.x = value
elif key==1: self.y = value
elif key==2: self.z = value
elif key==3: self.w = value
else:
raise IndexError,"index out of range"
def length(self):
"""Return the length of the vector.
v.length() is equivalent to abs(v).
>>> a=vec4(1.0, 0.5, -1.8, 0.2)
>>> print a.length()
2.12837966538
"""
return math.sqrt(self*self)
def normalize(self):
"""Return normalized vector.
>>> a=vec4(1.0, 0.5, -1.8, 1.2)
>>> print a.normalize()
(0.4107, 0.2053, -0.7392, 0.4928)
"""
nlen = 1.0/math.sqrt(self*self)
return vec4(self.x*nlen, self.y*nlen, self.z*nlen, self.w*nlen)
######################################################################
def _test():
import doctest, vec4
failed, total = doctest.testmod(vec4)
print "%d/%d failed" % (failed, total)
if __name__=="__main__":
_test()
| agpl-3.0 |
rosswhitfield/mantid | scripts/Engineering/gui/engineering_diffraction/tabs/focus/model.py | 3 | 17365 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import csv
from os import path, makedirs
from matplotlib import gridspec
import matplotlib.pyplot as plt
from Engineering.gui.engineering_diffraction.tabs.common import vanadium_corrections, path_handling
from Engineering.gui.engineering_diffraction.settings.settings_helper import get_setting
from Engineering.EnggUtils import create_custom_grouping_workspace
from mantid.simpleapi import logger, AnalysisDataService as Ads, SaveNexus, SaveGSS, SaveFocusedXYE, \
Load, NormaliseByCurrent, Divide, DiffractionFocussing, RebinToWorkspace, DeleteWorkspace, ApplyDiffCal, \
ConvertUnits, ReplaceSpecialValues
SAMPLE_RUN_WORKSPACE_NAME = "engggui_focusing_input_ws"
FOCUSED_OUTPUT_WORKSPACE_NAME = "engggui_focusing_output_ws_bank_"
CALIB_PARAMS_WORKSPACE_NAME = "engggui_calibration_banks_parameters"
NORTH_BANK_CAL = "EnginX_NorthBank.cal"
SOUTH_BANK_CAL = "EnginX_SouthBank.cal"
class FocusModel(object):
def __init__(self):
self._last_path = None
self._last_path_ws = None
def get_last_path(self):
return self._last_path
def focus_run(self, sample_paths, banks, plot_output, instrument, rb_num, spectrum_numbers, custom_cal):
"""
Focus some data using the current calibration.
:param sample_paths: The paths to the data to be focused.
:param banks: The banks that should be focused.
:param plot_output: True if the output should be plotted.
:param instrument: The instrument that the data came from.
:param rb_num: The experiment number, used to create directories. Can be None
:param spectrum_numbers: The specific spectra that should be focused. Used instead of banks.
:param custom_cal: User defined calibration file to crop the focus to
"""
full_calib_path = get_setting(path_handling.INTERFACES_SETTINGS_GROUP,
path_handling.ENGINEERING_PREFIX, "full_calibration")
if not Ads.doesExist("full_inst_calib"):
try:
full_calib_workspace = Load(full_calib_path, OutputWorkspace="full_inst_calib")
except RuntimeError:
logger.error("Error loading Full instrument calibration - this is set in the interface settings.")
return
else:
full_calib_workspace = Ads.retrieve("full_inst_calib")
if not Ads.doesExist(vanadium_corrections.INTEGRATED_WORKSPACE_NAME) and not Ads.doesExist(
vanadium_corrections.CURVES_WORKSPACE_NAME):
return
integration_workspace = Ads.retrieve(vanadium_corrections.INTEGRATED_WORKSPACE_NAME)
curves_workspace = Ads.retrieve(vanadium_corrections.CURVES_WORKSPACE_NAME)
output_workspaces = [] # List of collated workspaces to plot.
df_kwarg, name, region_calib = None, None, None
if spectrum_numbers:
inst_ws = path_handling.load_workspace(sample_paths[0])
grp_ws = create_custom_grouping_workspace(spectrum_numbers, inst_ws)
df_kwarg = {"GroupingWorkspace": grp_ws}
region_calib = "engggui_calibration_Cropped"
name = 'Cropped'
elif custom_cal:
# TODO this functionality has not yet been fully implemented
df_kwarg = {"GroupingFileName": custom_cal}
region_calib = "engggui_calibration_Custom"
name = 'Custom'
if df_kwarg:
# check correct region calibration exists
if not Ads.doesExist(region_calib):
logger.warning(f"Cannot focus as the region calibration workspace \"{region_calib}\" is not "
f"present.")
return
for sample_path in sample_paths:
sample_workspace = path_handling.load_workspace(sample_path)
run_no = path_handling.get_run_number_from_path(sample_path, instrument)
tof_output_name = str(run_no) + "_" + FOCUSED_OUTPUT_WORKSPACE_NAME + name
dspacing_output_name = tof_output_name + "_dSpacing"
# perform prefocus operations on whole instrument workspace
prefocus_success = self._whole_inst_prefocus(sample_workspace, integration_workspace,
full_calib_workspace)
if not prefocus_success:
continue
# perform focus over chosen region of interest
self._run_focus(sample_workspace, tof_output_name, curves_workspace, df_kwarg, region_calib)
output_workspaces.append([tof_output_name])
self._save_output(instrument, sample_path, "Cropped", tof_output_name, rb_num)
self._save_output(instrument, sample_path, "Cropped", dspacing_output_name, rb_num, unit="dSpacing")
self._output_sample_logs(instrument, run_no, sample_workspace, rb_num)
# remove created grouping workspace if present
if Ads.doesExist("grp_ws"):
DeleteWorkspace("grp_ws")
else:
for sample_path in sample_paths:
sample_workspace = path_handling.load_workspace(sample_path)
run_no = path_handling.get_run_number_from_path(sample_path, instrument)
workspaces_for_run = []
# perform prefocus operations on whole instrument workspace
prefocus_success = self._whole_inst_prefocus(sample_workspace, integration_workspace,
full_calib_workspace)
if not prefocus_success:
continue
# perform focus over chosen banks
for name in banks:
tof_output_name = str(run_no) + "_" + FOCUSED_OUTPUT_WORKSPACE_NAME + str(name)
dspacing_output_name = tof_output_name + "_dSpacing"
if name == '1':
df_kwarg = {"GroupingFileName": NORTH_BANK_CAL}
region_calib = "engggui_calibration_bank_1"
else:
df_kwarg = {"GroupingFileName": SOUTH_BANK_CAL}
region_calib = "engggui_calibration_bank_2"
# check correct region calibration exists
if not Ads.doesExist(region_calib):
logger.warning(f"Cannot focus as the region calibration workspace \"{region_calib}\" is not "
f"present.")
return
self._run_focus(sample_workspace, tof_output_name, curves_workspace, df_kwarg, region_calib)
workspaces_for_run.append(tof_output_name)
# Save the output to the file system.
self._save_output(instrument, sample_path, name, tof_output_name, rb_num)
self._save_output(instrument, sample_path, name, dspacing_output_name, rb_num, unit="dSpacing")
output_workspaces.append(workspaces_for_run)
self._output_sample_logs(instrument, run_no, sample_workspace, rb_num)
DeleteWorkspace(sample_workspace)
# Plot the output
if plot_output:
for ws_names in output_workspaces:
self._plot_focused_workspaces(ws_names)
@staticmethod
def _whole_inst_prefocus(input_workspace,
vanadium_integration_ws,
full_calib) -> bool:
"""This is used to perform the operations done on the whole instrument workspace, before the chosen region of
interest is focused using _run_focus
:param input_workspace: Raw sample run to process prior to focussing over a region of interest
:param vanadium_integration_ws: Integral of the supplied vanadium run
:param full_calib: Full instrument calibration workspace (table ws output from PDCalibration)
:return True if successful, False if aborted
"""
if input_workspace.getRun().getProtonCharge() > 0:
NormaliseByCurrent(InputWorkspace=input_workspace, OutputWorkspace=input_workspace)
else:
logger.warning(f"Skipping focus of run {input_workspace.name()} because it has invalid proton charge.")
return False
input_workspace /= vanadium_integration_ws
# replace nans created in sensitivity correction
ReplaceSpecialValues(InputWorkspace=input_workspace, OutputWorkspace=input_workspace, NaNValue=0,
InfinityValue=0)
ApplyDiffCal(InstrumentWorkspace=input_workspace, CalibrationWorkspace=full_calib)
ConvertUnits(InputWorkspace=input_workspace, OutputWorkspace=input_workspace, Target='dSpacing')
return True
@staticmethod
def _run_focus(input_workspace,
tof_output_name,
vanadium_curves_ws,
df_kwarg,
region_calib) -> None:
"""Focus the processed full instrument workspace over the chosen region of interest
:param input_workspace: Processed full instrument workspace converted to dSpacing
:param tof_output_name: Name for the time-of-flight output workspace
:param vanadium_curves_ws: Workspace containing the vanadium curves
:param df_kwarg: kwarg to pass to DiffractionFocussing specifying the region of interest
:param region_calib: Region of interest calibration workspace (table ws output from PDCalibration)
"""
# rename workspace prior to focussing to avoid errors later
dspacing_output_name = tof_output_name + "_dSpacing"
# focus over specified region of interest
focused_sample = DiffractionFocussing(InputWorkspace=input_workspace, OutputWorkspace=dspacing_output_name,
**df_kwarg)
curves_rebinned = RebinToWorkspace(WorkspaceToRebin=vanadium_curves_ws, WorkspaceToMatch=focused_sample)
Divide(LHSWorkspace=focused_sample, RHSWorkspace=curves_rebinned, OutputWorkspace=focused_sample,
AllowDifferentNumberSpectra=True)
# apply calibration from specified region of interest
ApplyDiffCal(InstrumentWorkspace=focused_sample, CalibrationWorkspace=region_calib)
# set bankid for use in fit tab
run = focused_sample.getRun()
if region_calib == "engggui_calibration_bank_1":
run.addProperty("bankid", 1, True)
elif region_calib == "engggui_calibration_bank_2":
run.addProperty("bankid", 2, True)
else:
run.addProperty("bankid", 3, True)
# output in both dSpacing and TOF
ConvertUnits(InputWorkspace=focused_sample, OutputWorkspace=tof_output_name, Target='TOF')
DeleteWorkspace(curves_rebinned)
@staticmethod
def _plot_focused_workspaces(focused_workspaces):
fig = plt.figure()
gs = gridspec.GridSpec(1, len(focused_workspaces))
plots = [
fig.add_subplot(gs[i], projection="mantid") for i in range(len(focused_workspaces))
]
for ax, ws_name in zip(plots, focused_workspaces):
ax.plot(Ads.retrieve(ws_name), wkspIndex=0)
ax.set_title(ws_name)
fig.show()
def _save_output(self, instrument, sample_path, bank, sample_workspace, rb_num, unit="TOF"):
"""
Save a focused workspace to the file system. Saves separate copies to a User directory if an rb number has
been set.
:param instrument: The instrument the data is from.
:param sample_path: The path to the data file that was focused.
:param bank: The name of the bank being saved.
:param sample_workspace: The name of the workspace to be saved.
:param rb_num: Usually an experiment id, defines the name of the user directory.
"""
self._save_focused_output_files_as_nexus(instrument, sample_path, bank, sample_workspace,
rb_num, unit)
self._save_focused_output_files_as_gss(instrument, sample_path, bank, sample_workspace,
rb_num, unit)
self._save_focused_output_files_as_topas_xye(instrument, sample_path, bank, sample_workspace,
rb_num, unit)
output_path = path.join(path_handling.get_output_path(), 'Focus')
logger.notice(f"\n\nFocus files saved to: \"{output_path}\"\n\n")
if rb_num:
output_path = path.join(path_handling.get_output_path(), 'User', rb_num, 'Focus')
logger.notice(f"\n\nFocus files also saved to: \"{output_path}\"\n\n")
self._last_path = output_path
if self._last_path and self._last_path_ws:
self._last_path = path.join(self._last_path, self._last_path_ws)
def _save_focused_output_files_as_gss(self, instrument, sample_path, bank, sample_workspace,
rb_num, unit):
gss_output_path = path.join(
path_handling.get_output_path(), "Focus",
self._generate_output_file_name(instrument, sample_path, bank, unit, ".gss"))
SaveGSS(InputWorkspace=sample_workspace, Filename=gss_output_path)
if rb_num:
gss_output_path = path.join(
path_handling.get_output_path(), "User", rb_num, "Focus",
self._generate_output_file_name(instrument, sample_path, bank, unit, ".gss"))
SaveGSS(InputWorkspace=sample_workspace, Filename=gss_output_path)
def _save_focused_output_files_as_nexus(self, instrument, sample_path, bank, sample_workspace,
rb_num, unit):
file_name = self._generate_output_file_name(instrument, sample_path, bank, unit, ".nxs")
nexus_output_path = path.join(path_handling.get_output_path(), "Focus", file_name)
SaveNexus(InputWorkspace=sample_workspace, Filename=nexus_output_path)
if rb_num:
nexus_output_path = path.join(
path_handling.get_output_path(), "User", rb_num, "Focus", file_name)
SaveNexus(InputWorkspace=sample_workspace, Filename=nexus_output_path)
self._last_path_ws = file_name
def _save_focused_output_files_as_topas_xye(self, instrument, sample_path, bank,
sample_workspace, rb_num, unit):
xye_output_path = path.join(
path_handling.get_output_path(), "Focus",
self._generate_output_file_name(instrument, sample_path, bank, unit, ".abc"))
SaveFocusedXYE(InputWorkspace=sample_workspace,
Filename=xye_output_path,
SplitFiles=False,
Format="TOPAS")
if rb_num:
xye_output_path = path.join(
path_handling.get_output_path(), "User", rb_num, "Focus",
self._generate_output_file_name(instrument, sample_path, bank, unit, ".abc"))
SaveFocusedXYE(InputWorkspace=sample_workspace,
Filename=xye_output_path,
SplitFiles=False,
Format="TOPAS")
@staticmethod
def _output_sample_logs(instrument, run_number, workspace, rb_num):
def write_to_file():
with open(output_path, "w", newline="") as logfile:
writer = csv.writer(logfile, ["Sample Log", "Avg Value"])
for log in output_dict:
writer.writerow([log, output_dict[log]])
output_dict = {}
sample_run = workspace.getRun()
log_names = sample_run.keys()
# Collect numerical sample logs.
for name in log_names:
try:
output_dict[name] = sample_run.getPropertyAsSingleValue(name)
except ValueError:
logger.information(f"Could not convert {name} to a numerical value. It will not be included in the "
f"sample logs output file.")
focus_dir = path.join(path_handling.get_output_path(), "Focus")
if not path.exists(focus_dir):
makedirs(focus_dir)
output_path = path.join(focus_dir, (instrument + "_" + run_number + "_sample_logs.csv"))
write_to_file()
if rb_num:
focus_user_dir = path.join(path_handling.get_output_path(), "User", rb_num, "Focus")
if not path.exists(focus_user_dir):
makedirs(focus_user_dir)
output_path = path.join(focus_user_dir, (instrument + "_" + run_number + "_sample_logs.csv"))
write_to_file()
@staticmethod
def _generate_output_file_name(instrument, sample_path, bank, unit, suffix):
run_no = path_handling.get_run_number_from_path(sample_path, instrument)
return instrument + '_' + run_no + '_' + "bank_" + bank + '_' + unit + suffix
| gpl-3.0 |
karthik-sethuraman/ONFOpenTransport | RI/flask_server/tapi_server/models/tapi_path_computation_context_augmentation3.py | 4 | 2512 | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server.models.tapi_path_computation_path_computation_context import TapiPathComputationPathComputationContext # noqa: F401,E501
from tapi_server import util
class TapiPathComputationContextAugmentation3(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, path_computation_context=None): # noqa: E501
"""TapiPathComputationContextAugmentation3 - a model defined in OpenAPI
:param path_computation_context: The path_computation_context of this TapiPathComputationContextAugmentation3. # noqa: E501
:type path_computation_context: TapiPathComputationPathComputationContext
"""
self.openapi_types = {
'path_computation_context': TapiPathComputationPathComputationContext
}
self.attribute_map = {
'path_computation_context': 'path-computation-context'
}
self._path_computation_context = path_computation_context
@classmethod
def from_dict(cls, dikt) -> 'TapiPathComputationContextAugmentation3':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.path.computation.ContextAugmentation3 of this TapiPathComputationContextAugmentation3. # noqa: E501
:rtype: TapiPathComputationContextAugmentation3
"""
return util.deserialize_model(dikt, cls)
@property
def path_computation_context(self):
"""Gets the path_computation_context of this TapiPathComputationContextAugmentation3.
:return: The path_computation_context of this TapiPathComputationContextAugmentation3.
:rtype: TapiPathComputationPathComputationContext
"""
return self._path_computation_context
@path_computation_context.setter
def path_computation_context(self, path_computation_context):
"""Sets the path_computation_context of this TapiPathComputationContextAugmentation3.
:param path_computation_context: The path_computation_context of this TapiPathComputationContextAugmentation3.
:type path_computation_context: TapiPathComputationPathComputationContext
"""
self._path_computation_context = path_computation_context
| apache-2.0 |
tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/numpy/distutils/command/build_ext.py | 149 | 22493 | """ Modified version of build_ext that handles fortran source files.
"""
from __future__ import division, absolute_import, print_function
import os
import sys
from glob import glob
from distutils.dep_util import newer_group
from distutils.command.build_ext import build_ext as old_build_ext
from distutils.errors import DistutilsFileError, DistutilsSetupError,\
DistutilsError
from distutils.file_util import copy_file
from numpy.distutils import log
from numpy.distutils.exec_command import exec_command
from numpy.distutils.system_info import combine_paths
from numpy.distutils.misc_util import filter_sources, has_f_sources, \
has_cxx_sources, get_ext_source_files, \
get_numpy_include_dirs, is_sequence, get_build_architecture, \
msvc_version
from numpy.distutils.command.config_compiler import show_fortran_compilers
try:
set
except NameError:
from sets import Set as set
class build_ext (old_build_ext):
description = "build C/C++/F extensions (compile/link to build directory)"
user_options = old_build_ext.user_options + [
('fcompiler=', None,
"specify the Fortran compiler type"),
('parallel=', 'j',
"number of parallel jobs"),
]
help_options = old_build_ext.help_options + [
('help-fcompiler', None, "list available Fortran compilers",
show_fortran_compilers),
]
def initialize_options(self):
old_build_ext.initialize_options(self)
self.fcompiler = None
self.parallel = None
def finalize_options(self):
if self.parallel:
try:
self.parallel = int(self.parallel)
except ValueError:
raise ValueError("--parallel/-j argument must be an integer")
# Ensure that self.include_dirs and self.distribution.include_dirs
# refer to the same list object. finalize_options will modify
# self.include_dirs, but self.distribution.include_dirs is used
# during the actual build.
# self.include_dirs is None unless paths are specified with
# --include-dirs.
# The include paths will be passed to the compiler in the order:
# numpy paths, --include-dirs paths, Python include path.
if isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
incl_dirs = self.include_dirs or []
if self.distribution.include_dirs is None:
self.distribution.include_dirs = []
self.include_dirs = self.distribution.include_dirs
self.include_dirs.extend(incl_dirs)
old_build_ext.finalize_options(self)
self.set_undefined_options('build', ('parallel', 'parallel'))
def run(self):
if not self.extensions:
return
# Make sure that extension sources are complete.
self.run_command('build_src')
if self.distribution.has_c_libraries():
if self.inplace:
if self.distribution.have_run.get('build_clib'):
log.warn('build_clib already run, it is too late to ' \
'ensure in-place build of build_clib')
build_clib = self.distribution.get_command_obj('build_clib')
else:
build_clib = self.distribution.get_command_obj('build_clib')
build_clib.inplace = 1
build_clib.ensure_finalized()
build_clib.run()
self.distribution.have_run['build_clib'] = 1
else:
self.run_command('build_clib')
build_clib = self.get_finalized_command('build_clib')
self.library_dirs.append(build_clib.build_clib)
else:
build_clib = None
# Not including C libraries to the list of
# extension libraries automatically to prevent
# bogus linking commands. Extensions must
# explicitly specify the C libraries that they use.
from distutils.ccompiler import new_compiler
from numpy.distutils.fcompiler import new_fcompiler
compiler_type = self.compiler
# Initialize C compiler:
self.compiler = new_compiler(compiler=compiler_type,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
self.compiler.customize(self.distribution)
self.compiler.customize_cmd(self)
self.compiler.show_customization()
# Create mapping of libraries built by build_clib:
clibs = {}
if build_clib is not None:
for libname, build_info in build_clib.libraries or []:
if libname in clibs and clibs[libname] != build_info:
log.warn('library %r defined more than once,'\
' overwriting build_info\n%s... \nwith\n%s...' \
% (libname, repr(clibs[libname])[:300], repr(build_info)[:300]))
clibs[libname] = build_info
# .. and distribution libraries:
for libname, build_info in self.distribution.libraries or []:
if libname in clibs:
# build_clib libraries have a precedence before distribution ones
continue
clibs[libname] = build_info
# Determine if C++/Fortran 77/Fortran 90 compilers are needed.
# Update extension libraries, library_dirs, and macros.
all_languages = set()
for ext in self.extensions:
ext_languages = set()
c_libs = []
c_lib_dirs = []
macros = []
for libname in ext.libraries:
if libname in clibs:
binfo = clibs[libname]
c_libs += binfo.get('libraries', [])
c_lib_dirs += binfo.get('library_dirs', [])
for m in binfo.get('macros', []):
if m not in macros:
macros.append(m)
for l in clibs.get(libname, {}).get('source_languages', []):
ext_languages.add(l)
if c_libs:
new_c_libs = ext.libraries + c_libs
log.info('updating extension %r libraries from %r to %r'
% (ext.name, ext.libraries, new_c_libs))
ext.libraries = new_c_libs
ext.library_dirs = ext.library_dirs + c_lib_dirs
if macros:
log.info('extending extension %r defined_macros with %r'
% (ext.name, macros))
ext.define_macros = ext.define_macros + macros
# determine extension languages
if has_f_sources(ext.sources):
ext_languages.add('f77')
if has_cxx_sources(ext.sources):
ext_languages.add('c++')
l = ext.language or self.compiler.detect_language(ext.sources)
if l:
ext_languages.add(l)
# reset language attribute for choosing proper linker
if 'c++' in ext_languages:
ext_language = 'c++'
elif 'f90' in ext_languages:
ext_language = 'f90'
elif 'f77' in ext_languages:
ext_language = 'f77'
else:
ext_language = 'c' # default
if l and l != ext_language and ext.language:
log.warn('resetting extension %r language from %r to %r.' %
(ext.name, l, ext_language))
ext.language = ext_language
# global language
all_languages.update(ext_languages)
need_f90_compiler = 'f90' in all_languages
need_f77_compiler = 'f77' in all_languages
need_cxx_compiler = 'c++' in all_languages
# Initialize C++ compiler:
if need_cxx_compiler:
self._cxx_compiler = new_compiler(compiler=compiler_type,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
compiler = self._cxx_compiler
compiler.customize(self.distribution, need_cxx=need_cxx_compiler)
compiler.customize_cmd(self)
compiler.show_customization()
self._cxx_compiler = compiler.cxx_compiler()
else:
self._cxx_compiler = None
# Initialize Fortran 77 compiler:
if need_f77_compiler:
ctype = self.fcompiler
self._f77_compiler = new_fcompiler(compiler=self.fcompiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force,
requiref90=False,
c_compiler=self.compiler)
fcompiler = self._f77_compiler
if fcompiler:
ctype = fcompiler.compiler_type
fcompiler.customize(self.distribution)
if fcompiler and fcompiler.get_version():
fcompiler.customize_cmd(self)
fcompiler.show_customization()
else:
self.warn('f77_compiler=%s is not available.' %
(ctype))
self._f77_compiler = None
else:
self._f77_compiler = None
# Initialize Fortran 90 compiler:
if need_f90_compiler:
ctype = self.fcompiler
self._f90_compiler = new_fcompiler(compiler=self.fcompiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force,
requiref90=True,
c_compiler = self.compiler)
fcompiler = self._f90_compiler
if fcompiler:
ctype = fcompiler.compiler_type
fcompiler.customize(self.distribution)
if fcompiler and fcompiler.get_version():
fcompiler.customize_cmd(self)
fcompiler.show_customization()
else:
self.warn('f90_compiler=%s is not available.' %
(ctype))
self._f90_compiler = None
else:
self._f90_compiler = None
# Build extensions
self.build_extensions()
def swig_sources(self, sources):
# Do nothing. Swig sources have beed handled in build_src command.
return sources
def build_extension(self, ext):
sources = ext.sources
if sources is None or not is_sequence(sources):
raise DistutilsSetupError(
("in 'ext_modules' option (extension '%s'), " +
"'sources' must be present and must be " +
"a list of source filenames") % ext.name)
sources = list(sources)
if not sources:
return
fullname = self.get_ext_fullname(ext.name)
if self.inplace:
modpath = fullname.split('.')
package = '.'.join(modpath[0:-1])
base = modpath[-1]
build_py = self.get_finalized_command('build_py')
package_dir = build_py.get_package_dir(package)
ext_filename = os.path.join(package_dir,
self.get_ext_filename(base))
else:
ext_filename = os.path.join(self.build_lib,
self.get_ext_filename(fullname))
depends = sources + ext.depends
if not (self.force or newer_group(depends, ext_filename, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
extra_args = ext.extra_compile_args or []
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
c_sources, cxx_sources, f_sources, fmodule_sources = \
filter_sources(ext.sources)
if self.compiler.compiler_type=='msvc':
if cxx_sources:
# Needed to compile kiva.agg._agg extension.
extra_args.append('/Zm1000')
# this hack works around the msvc compiler attributes
# problem, msvc uses its own convention :(
c_sources += cxx_sources
cxx_sources = []
# Set Fortran/C++ compilers for compilation and linking.
if ext.language=='f90':
fcompiler = self._f90_compiler
elif ext.language=='f77':
fcompiler = self._f77_compiler
else: # in case ext.language is c++, for instance
fcompiler = self._f90_compiler or self._f77_compiler
if fcompiler is not None:
fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr(ext, 'extra_f77_compile_args') else []
fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr(ext, 'extra_f90_compile_args') else []
cxx_compiler = self._cxx_compiler
# check for the availability of required compilers
if cxx_sources and cxx_compiler is None:
raise DistutilsError("extension %r has C++ sources" \
"but no C++ compiler found" % (ext.name))
if (f_sources or fmodule_sources) and fcompiler is None:
raise DistutilsError("extension %r has Fortran sources " \
"but no Fortran compiler found" % (ext.name))
if ext.language in ['f77', 'f90'] and fcompiler is None:
self.warn("extension %r has Fortran libraries " \
"but no Fortran linker found, using default linker" % (ext.name))
if ext.language=='c++' and cxx_compiler is None:
self.warn("extension %r has C++ libraries " \
"but no C++ linker found, using default linker" % (ext.name))
kws = {'depends':ext.depends}
output_dir = self.build_temp
include_dirs = ext.include_dirs + get_numpy_include_dirs()
c_objects = []
if c_sources:
log.info("compiling C sources")
c_objects = self.compiler.compile(c_sources,
output_dir=output_dir,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_args,
**kws)
if cxx_sources:
log.info("compiling C++ sources")
c_objects += cxx_compiler.compile(cxx_sources,
output_dir=output_dir,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_args,
**kws)
extra_postargs = []
f_objects = []
if fmodule_sources:
log.info("compiling Fortran 90 module sources")
module_dirs = ext.module_dirs[:]
module_build_dir = os.path.join(
self.build_temp, os.path.dirname(
self.get_ext_filename(fullname)))
self.mkpath(module_build_dir)
if fcompiler.module_dir_switch is None:
existing_modules = glob('*.mod')
extra_postargs += fcompiler.module_options(
module_dirs, module_build_dir)
f_objects += fcompiler.compile(fmodule_sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_postargs,
depends=ext.depends)
if fcompiler.module_dir_switch is None:
for f in glob('*.mod'):
if f in existing_modules:
continue
t = os.path.join(module_build_dir, f)
if os.path.abspath(f)==os.path.abspath(t):
continue
if os.path.isfile(t):
os.remove(t)
try:
self.move_file(f, module_build_dir)
except DistutilsFileError:
log.warn('failed to move %r to %r' %
(f, module_build_dir))
if f_sources:
log.info("compiling Fortran sources")
f_objects += fcompiler.compile(f_sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug,
extra_postargs=extra_postargs,
depends=ext.depends)
objects = c_objects + f_objects
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
libraries = self.get_libraries(ext)[:]
library_dirs = ext.library_dirs[:]
linker = self.compiler.link_shared_object
# Always use system linker when using MSVC compiler.
if self.compiler.compiler_type in ('msvc', 'intelw', 'intelemw'):
# expand libraries with fcompiler libraries as we are
# not using fcompiler linker
self._libs_with_msvc_and_fortran(fcompiler, libraries, library_dirs)
elif ext.language in ['f77', 'f90'] and fcompiler is not None:
linker = fcompiler.link_shared_object
if ext.language=='c++' and cxx_compiler is not None:
linker = cxx_compiler.link_shared_object
linker(objects, ext_filename,
libraries=libraries,
library_dirs=library_dirs,
runtime_library_dirs=ext.runtime_library_dirs,
extra_postargs=extra_args,
export_symbols=self.get_export_symbols(ext),
debug=self.debug,
build_temp=self.build_temp,
target_lang=ext.language)
def _add_dummy_mingwex_sym(self, c_sources):
build_src = self.get_finalized_command("build_src").build_src
build_clib = self.get_finalized_command("build_clib").build_clib
objects = self.compiler.compile([os.path.join(build_src,
"gfortran_vs2003_hack.c")],
output_dir=self.build_temp)
self.compiler.create_static_lib(objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug)
def _libs_with_msvc_and_fortran(self, fcompiler, c_libraries,
c_library_dirs):
if fcompiler is None: return
for libname in c_libraries:
if libname.startswith('msvc'): continue
fileexists = False
for libdir in c_library_dirs or []:
libfile = os.path.join(libdir, '%s.lib' % (libname))
if os.path.isfile(libfile):
fileexists = True
break
if fileexists: continue
# make g77-compiled static libs available to MSVC
fileexists = False
for libdir in c_library_dirs:
libfile = os.path.join(libdir, 'lib%s.a' % (libname))
if os.path.isfile(libfile):
# copy libname.a file to name.lib so that MSVC linker
# can find it
libfile2 = os.path.join(self.build_temp, libname + '.lib')
copy_file(libfile, libfile2)
if self.build_temp not in c_library_dirs:
c_library_dirs.append(self.build_temp)
fileexists = True
break
if fileexists: continue
log.warn('could not find library %r in directories %s'
% (libname, c_library_dirs))
# Always use system linker when using MSVC compiler.
f_lib_dirs = []
for dir in fcompiler.library_dirs:
# correct path when compiling in Cygwin but with normal Win
# Python
if dir.startswith('/usr/lib'):
s, o = exec_command(['cygpath', '-w', dir], use_tee=False)
if not s:
dir = o
f_lib_dirs.append(dir)
c_library_dirs.extend(f_lib_dirs)
# make g77-compiled static libs available to MSVC
for lib in fcompiler.libraries:
if not lib.startswith('msvc'):
c_libraries.append(lib)
p = combine_paths(f_lib_dirs, 'lib' + lib + '.a')
if p:
dst_name = os.path.join(self.build_temp, lib + '.lib')
if not os.path.isfile(dst_name):
copy_file(p[0], dst_name)
if self.build_temp not in c_library_dirs:
c_library_dirs.append(self.build_temp)
def get_source_files (self):
self.check_extensions_list(self.extensions)
filenames = []
for ext in self.extensions:
filenames.extend(get_ext_source_files(ext))
return filenames
def get_outputs (self):
self.check_extensions_list(self.extensions)
outputs = []
for ext in self.extensions:
if not ext.sources:
continue
fullname = self.get_ext_fullname(ext.name)
outputs.append(os.path.join(self.build_lib,
self.get_ext_filename(fullname)))
return outputs
| bsd-3-clause |
davasqueza/eriskco_conector_CloudSQL | lib/jinja2/testsuite/inheritance.py | 414 | 8248 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.inheritance
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the template inheritance feature.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Environment, DictLoader, TemplateError
LAYOUTTEMPLATE = '''\
|{% block block1 %}block 1 from layout{% endblock %}
|{% block block2 %}block 2 from layout{% endblock %}
|{% block block3 %}
{% block block4 %}nested block 4 from layout{% endblock %}
{% endblock %}|'''
LEVEL1TEMPLATE = '''\
{% extends "layout" %}
{% block block1 %}block 1 from level1{% endblock %}'''
LEVEL2TEMPLATE = '''\
{% extends "level1" %}
{% block block2 %}{% block block5 %}nested block 5 from level2{%
endblock %}{% endblock %}'''
LEVEL3TEMPLATE = '''\
{% extends "level2" %}
{% block block5 %}block 5 from level3{% endblock %}
{% block block4 %}block 4 from level3{% endblock %}
'''
LEVEL4TEMPLATE = '''\
{% extends "level3" %}
{% block block3 %}block 3 from level4{% endblock %}
'''
WORKINGTEMPLATE = '''\
{% extends "layout" %}
{% block block1 %}
{% if false %}
{% block block2 %}
this should workd
{% endblock %}
{% endif %}
{% endblock %}
'''
DOUBLEEXTENDS = '''\
{% extends "layout" %}
{% extends "layout" %}
{% block block1 %}
{% if false %}
{% block block2 %}
this should workd
{% endblock %}
{% endif %}
{% endblock %}
'''
env = Environment(loader=DictLoader({
'layout': LAYOUTTEMPLATE,
'level1': LEVEL1TEMPLATE,
'level2': LEVEL2TEMPLATE,
'level3': LEVEL3TEMPLATE,
'level4': LEVEL4TEMPLATE,
'working': WORKINGTEMPLATE,
'doublee': DOUBLEEXTENDS,
}), trim_blocks=True)
class InheritanceTestCase(JinjaTestCase):
def test_layout(self):
tmpl = env.get_template('layout')
assert tmpl.render() == ('|block 1 from layout|block 2 from '
'layout|nested block 4 from layout|')
def test_level1(self):
tmpl = env.get_template('level1')
assert tmpl.render() == ('|block 1 from level1|block 2 from '
'layout|nested block 4 from layout|')
def test_level2(self):
tmpl = env.get_template('level2')
assert tmpl.render() == ('|block 1 from level1|nested block 5 from '
'level2|nested block 4 from layout|')
def test_level3(self):
tmpl = env.get_template('level3')
assert tmpl.render() == ('|block 1 from level1|block 5 from level3|'
'block 4 from level3|')
def test_level4(sel):
tmpl = env.get_template('level4')
assert tmpl.render() == ('|block 1 from level1|block 5 from '
'level3|block 3 from level4|')
def test_super(self):
env = Environment(loader=DictLoader({
'a': '{% block intro %}INTRO{% endblock %}|'
'BEFORE|{% block data %}INNER{% endblock %}|AFTER',
'b': '{% extends "a" %}{% block data %}({{ '
'super() }}){% endblock %}',
'c': '{% extends "b" %}{% block intro %}--{{ '
'super() }}--{% endblock %}\n{% block data '
'%}[{{ super() }}]{% endblock %}'
}))
tmpl = env.get_template('c')
assert tmpl.render() == '--INTRO--|BEFORE|[(INNER)]|AFTER'
def test_working(self):
tmpl = env.get_template('working')
def test_reuse_blocks(self):
tmpl = env.from_string('{{ self.foo() }}|{% block foo %}42'
'{% endblock %}|{{ self.foo() }}')
assert tmpl.render() == '42|42|42'
def test_preserve_blocks(self):
env = Environment(loader=DictLoader({
'a': '{% if false %}{% block x %}A{% endblock %}{% endif %}{{ self.x() }}',
'b': '{% extends "a" %}{% block x %}B{{ super() }}{% endblock %}'
}))
tmpl = env.get_template('b')
assert tmpl.render() == 'BA'
def test_dynamic_inheritance(self):
env = Environment(loader=DictLoader({
'master1': 'MASTER1{% block x %}{% endblock %}',
'master2': 'MASTER2{% block x %}{% endblock %}',
'child': '{% extends master %}{% block x %}CHILD{% endblock %}'
}))
tmpl = env.get_template('child')
for m in range(1, 3):
assert tmpl.render(master='master%d' % m) == 'MASTER%dCHILD' % m
def test_multi_inheritance(self):
env = Environment(loader=DictLoader({
'master1': 'MASTER1{% block x %}{% endblock %}',
'master2': 'MASTER2{% block x %}{% endblock %}',
'child': '''{% if master %}{% extends master %}{% else %}{% extends
'master1' %}{% endif %}{% block x %}CHILD{% endblock %}'''
}))
tmpl = env.get_template('child')
assert tmpl.render(master='master2') == 'MASTER2CHILD'
assert tmpl.render(master='master1') == 'MASTER1CHILD'
assert tmpl.render() == 'MASTER1CHILD'
def test_scoped_block(self):
env = Environment(loader=DictLoader({
'master.html': '{% for item in seq %}[{% block item scoped %}'
'{% endblock %}]{% endfor %}'
}))
t = env.from_string('{% extends "master.html" %}{% block item %}'
'{{ item }}{% endblock %}')
assert t.render(seq=list(range(5))) == '[0][1][2][3][4]'
def test_super_in_scoped_block(self):
env = Environment(loader=DictLoader({
'master.html': '{% for item in seq %}[{% block item scoped %}'
'{{ item }}{% endblock %}]{% endfor %}'
}))
t = env.from_string('{% extends "master.html" %}{% block item %}'
'{{ super() }}|{{ item * 2 }}{% endblock %}')
assert t.render(seq=list(range(5))) == '[0|0][1|2][2|4][3|6][4|8]'
def test_scoped_block_after_inheritance(self):
env = Environment(loader=DictLoader({
'layout.html': '''
{% block useless %}{% endblock %}
''',
'index.html': '''
{%- extends 'layout.html' %}
{% from 'helpers.html' import foo with context %}
{% block useless %}
{% for x in [1, 2, 3] %}
{% block testing scoped %}
{{ foo(x) }}
{% endblock %}
{% endfor %}
{% endblock %}
''',
'helpers.html': '''
{% macro foo(x) %}{{ the_foo + x }}{% endmacro %}
'''
}))
rv = env.get_template('index.html').render(the_foo=42).split()
assert rv == ['43', '44', '45']
class BugFixTestCase(JinjaTestCase):
def test_fixed_macro_scoping_bug(self):
assert Environment(loader=DictLoader({
'test.html': '''\
{% extends 'details.html' %}
{% macro my_macro() %}
my_macro
{% endmacro %}
{% block inner_box %}
{{ my_macro() }}
{% endblock %}
''',
'details.html': '''\
{% extends 'standard.html' %}
{% macro my_macro() %}
my_macro
{% endmacro %}
{% block content %}
{% block outer_box %}
outer_box
{% block inner_box %}
inner_box
{% endblock %}
{% endblock %}
{% endblock %}
''',
'standard.html': '''
{% block content %} {% endblock %}
'''
})).get_template("test.html").render().split() == [u'outer_box', u'my_macro']
def test_double_extends(self):
"""Ensures that a template with more than 1 {% extends ... %} usage
raises a ``TemplateError``.
"""
try:
tmpl = env.get_template('doublee')
except Exception as e:
assert isinstance(e, TemplateError)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(InheritanceTestCase))
suite.addTest(unittest.makeSuite(BugFixTestCase))
return suite
| apache-2.0 |
JoyTeam/metagam | mg/core/applications.py | 1 | 27519 | #!/usr/bin/python2.6
# This file is a part of Metagam project.
#
# Metagam is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# Metagam is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Metagam. If not, see <http://www.gnu.org/licenses/>.
from mg.core.cass import CassandraObject, CassandraObjectList
from concurrence import Tasklet, Timeout, TimeoutError
from concurrence.extra import Lock
from concurrence.http import HTTPConnection, HTTPError, HTTPRequest
from mg.core.tools import *
from mg.core.common import *
from mg.core.memcached import Memcached, MemcachedLock
from mg.core.config import Config
from operator import itemgetter
import weakref
import re
import cStringIO
import urlparse
import datetime
import gettext
import sys
import traceback
import time
re_hook_path = re.compile(r'^(.+?)\.(.+)$')
re_module_path = re.compile(r'^(.+)\.(.+)$')
re_remove_domain = re.compile(r'^.{,20}///')
class DBHookGroupModules(CassandraObject):
clsname = "HookGroupModules"
indexes = {
"all": [[]]
}
class DBHookGroupModulesList(CassandraObjectList):
objcls = DBHookGroupModules
class Hooks(object):
"""
This class is a hook manager for an application. It keeps list of loaded handlers
and passes them hook calls.
"""
class Return(Exception):
"This exception is raised when a hook handler wants to return the value immediately"
def __init__(self, value=None):
self.value = value
def __init__(self, app):
self.handlers = dict()
self.loaded_groups = set()
self.app = weakref.ref(app)
self.dynamic = False
def load_groups(self, groups):
"""
Load all modules handling any hooks in the given groups
groups - list of hook group names
"""
t = Tasklet.current()
if getattr(t, "hooks_locked", False):
self._load_groups(groups)
else:
with self.app().hook_lock:
t.hooks_locked = True
self._load_groups(groups)
t.hooks_locked = False
def _load_groups(self, groups):
"""
The same as load_groups but without locking
"""
load_groups = [g for g in groups if (g != "all") and (g not in self.loaded_groups)]
if len(load_groups):
lst = self.app().objlist(DBHookGroupModulesList, load_groups)
lst.load(silent=True)
modules = set()
for obj in lst:
if obj.get("list"):
for mod in obj.get("list"):
modules.add(mod)
modules = list(modules)
if len(modules):
self.app().modules.load(modules, silent=True, auto_loaded=True)
for g in load_groups:
self.loaded_groups.add(g)
def register(self, module_name, hook_name, handler, priority=0, priv=None):
"""
Register hook handler
module_name - fully qualified module name
hook_name - hook name (format: "group.name")
handler - will be called on hook calls
priority - order of hooks execution
"""
lst = self.handlers.get(hook_name)
if lst is None:
lst = []
self.handlers[hook_name] = lst
lst.append((handler, priority, module_name, priv))
lst.sort(key=itemgetter(1), reverse=True)
def clear(self):
"Unregister all registered hooks"
self.handlers.clear()
self.loaded_groups.clear()
def call(self, name, *args, **kwargs):
"""
Call handlers of the hook
name - hook name ("group.name")
*args, **kwargs - arbitrary parameters passed to the handlers
Some special kwargs (they are not passed to the handlers):
check_priv - require permission setting for the habdler
"""
if "check_priv" in kwargs:
check_priv = kwargs["check_priv"]
del kwargs["check_priv"]
else:
check_priv = None
m = re_hook_path.match(name)
if not m:
raise HookFormatError("Invalid hook name: %s" % name)
(hook_group, hook_name) = m.group(1, 2)
# ensure handling modules are loaded. "core" handlers are not loaded automatically
if self.dynamic and hook_group != "core" and hook_group not in self.loaded_groups and kwargs.get("load_handlers") is not False:
self.load_groups([hook_group])
if "load_handlers" in kwargs:
del kwargs["load_handlers"]
# call handlers
handlers = self.handlers.get(name)
ret = None
if handlers is not None:
for handler, priority, module_name, priv in handlers:
if check_priv:
if priv is None:
raise HandlerPermissionError("No privilege information in handler %s of module %s" % (name, module_name))
if priv == "public":
pass
elif priv == "logged":
self.call("session.require_login")
else:
self.call("session.require_login")
self.call("session.require_permission", priv)
try:
res = handler(*args, **kwargs)
if type(res) == tuple:
args = res
elif res is not None:
ret = res
except Hooks.Return as e:
return e.value
return ret
def store(self):
"""
This method iterates over installed handlers and stores group => struct(name => modules_list)
into the database
"""
if not self.dynamic:
return
rec = dict()
for name, handlers in self.handlers.items():
m = re_hook_path.match(name)
if not m:
raise HookFormatError("Invalid hook name: %s" % name)
(hook_group, hook_name) = m.group(1, 2)
if hook_group != "core":
grpset = rec.get(hook_group)
if grpset is None:
grpset = rec[hook_group] = set()
for handler in handlers:
grpset.add(handler[2])
with self.app().hook_lock:
with self.app().lock(["HOOK-GROUPS"]):
t = Tasklet.current()
t.hooks_locked = True
old_groups = self.app().objlist(DBHookGroupModulesList, query_index="all")
for obj in old_groups:
if not obj.uuid in rec:
obj.remove()
groups = self.app().objlist(DBHookGroupModulesList, [])
for group, grpset in rec.iteritems():
if group != "all":
obj = self.app().obj(DBHookGroupModules, group, data={})
obj.set("list", list(grpset))
groups.append(obj)
groups.store(dont_load=True)
t.hooks_locked = False
class Module(Loggable):
"""
Module is a main container for the software payload.
Module can intercept and handle hooks to provide any reaction
"""
def __init__(self, app, fqn):
"""
app - an Application object
fqn - fully qualified module name (format: "group.Class")
"""
Loggable.__init__(self, fqn)
self.app = weakref.ref(app)
self.inst = app.inst
def db(self):
return self.app().db
@property
def sql_read(self):
return self.app().sql_read
@property
def sql_write(self):
return self.app().sql_write
def rhook(self, *args, **kwargs):
"Registers handler for the current module. Arguments: all for Hooks.register() without module name"
self.app().hooks.register(self.fqn, *args, **kwargs)
def rdep(self, modules):
"Register module dependency. This module will be loaded automatically"
self.app().modules._load(modules, auto_loaded=True)
def conf(self, key, default=None, reset_cache=False):
"Syntactic sugar for app.config.get(key)"
conf = self.app().config
if reset_cache:
conf.clear()
return conf.get(key, default)
def call(self, *args, **kwargs):
"Syntactic sugar for app.hooks.call(...)"
return self.app().hooks.call(*args, **kwargs)
def _register(self):
"Register all required event handlers"
self.rhook("core.loaded_modules", self.loaded_modules)
self.register()
def register(self):
pass
def loaded_modules(self, list):
"Appends name of the current module to the list"
list.append(self.fqn)
def ok(self):
"""Returns value of "ok" HTTP parameter"""
return self.req().param("ok")
def exception(self, exception, silent=False, *args):
if not silent:
self.logger.exception(exception, *args)
self.call("exception.report", exception)
def _(self, val):
try:
value = self.req().trans.gettext(val)
if type(value) == str:
value = unicode(value, "utf-8")
return re_remove_domain.sub('', value)
except AttributeError:
pass
return re_remove_domain.sub('', self.call("l10n.gettext", val))
def obj(self, *args, **kwargs):
return self.app().obj(*args, **kwargs)
def objlist(self, *args, **kwargs):
return self.app().objlist(*args, **kwargs)
def time(self):
try:
req = self.req()
except AttributeError:
return time.time()
try:
return req._current_time
except AttributeError:
t = time.time()
req._current_time = t
return t
def req(self):
return Tasklet.current().req
def nowmonth(self):
return self.app().nowmonth()
def nowdate(self):
return self.app().nowdate()
def now(self, add=0):
return self.app().now(add)
def now_local(self, add=0):
return self.app().now_local(add)
def yesterday_interval(self):
return self.app().yesterday_interval()
def lock(self, *args, **kwargs):
return self.app().lock(*args, **kwargs)
def int_app(self):
"Returns reference to the application 'int'"
return self.app().inst.int_app
def main_app(self):
"Returns reference to the application 'main'"
try:
return self._main_app
except AttributeError:
pass
self._main_app = self.app().inst.appfactory.get_by_tag("main")
return self._main_app
def child_modules(self):
return []
def stemmer(self):
try:
return self.req()._stemmer
except AttributeError:
pass
st = self.call("l10n.stemmer")
try:
self.req()._stemmer = st
except AttributeError:
pass
return st
def stem(self, word):
return self.stemmer().stemWord(word)
def httpfile(self, url):
"Downloads given URL and returns it wrapped in StringIO"
try:
return cStringIO.StringIO(self.download(url))
except DownloadError:
return cStringIO.StringIO("")
def download(self, url):
"Downloads given URL and returns it"
if url is None:
raise DownloadError()
if type(url) == unicode:
url = url.encode("utf-8")
url_obj = urlparse.urlparse(url, "http", False)
if url_obj.scheme != "http":
self.error("Scheme '%s' is not supported", url_obj.scheme)
elif url_obj.hostname is None:
self.error("Empty hostname: %s", url)
else:
cnn = HTTPConnection()
try:
with Timeout.push(50):
cnn.set_limit(20000000)
port = url_obj.port
if port is None:
port = 80
cnn.connect((url_obj.hostname, port))
request = cnn.get(url_obj.path + url_obj.query)
request.add_header("Connection", "close")
response = cnn.perform(request)
if response.status_code != 200:
self.error("Error downloading %s: %s %s", url, response.status_code, response.status)
return ""
return response.body
except TimeoutError:
self.error("Timeout downloading %s", url)
except Exception as e:
self.error("Error downloading %s: %s", url, str(e))
finally:
try:
cnn.close()
except Exception:
pass
raise DownloadError()
def webdav_delete(self, url):
"Downloads given URL and returns it"
if url is None:
return
if type(url) == unicode:
url = url.encode("utf-8")
url_obj = urlparse.urlparse(url, "http", False)
if url_obj.scheme != "http":
self.error("Scheme '%s' is not supported", url_obj.scheme)
elif url_obj.hostname is None:
self.error("Empty hostname: %s", url)
else:
cnn = HTTPConnection()
try:
with Timeout.push(50):
port = url_obj.port
if port is None:
port = 80
cnn.connect((url_obj.hostname, port))
request = HTTPRequest()
request.method = "DELETE"
request.path = url_obj.path + url_obj.query
request.host = url_obj.hostname
request.add_header("Connection", "close")
cnn.perform(request)
except TimeoutError:
self.error("Timeout deleting %s", url)
except Exception as e:
self.error("Error deleting %s: %s", url, str(e))
finally:
try:
cnn.close()
except Exception:
pass
def image_format(self, image):
if image.format == "JPEG":
return ("jpg", "image/jpeg")
elif image.format == "PNG":
return ("png", "image/png")
elif image.format == "GIF":
return ("gif", "image/gif")
else:
return (None, None)
def qevent(self, event, **kwargs):
self.call("quests.event", event, **kwargs)
def clconf(self, key, default=None):
return self.app().clconf(key, default)
@property
def main_host(self):
return self.app().main_host
def is_recursive(self, occurences=2):
"Returns True if the caller found twice in the stack frame"
try:
raise ZeroDivisionError
except ZeroDivisionError:
fr = sys.exc_info()[2].tb_frame.f_back
cnt = 0
caller = (fr.f_code.co_filename, fr.f_code.co_name)
while fr is not None:
code = fr.f_code
if caller == (code.co_filename, code.co_name):
cnt += 1
if cnt >= occurences:
return True
fr = fr.f_back
return False
class ModuleError(Exception):
"Error during module loading"
pass
class Modules(object):
"""
This class is a modules manager for the application. It keeps list of loaded
modules and can load modules on demand
"""
def __init__(self, app):
self.app = weakref.ref(app)
self.modules_lock = Lock()
self.loaded_modules = dict()
self.not_auto_loaded = set()
self.modules_locked_by = None
def load(self, modules, silent=False, auto_loaded=False):
"""
Load requested modules.
modules - list of module names (format: "mg.group.Class" means
silent - don't fail on ImportError
auto_loaded - remove this modules on full reload
"import Class from mg.group")
"""
t = Tasklet.current()
if getattr(t, "modules_locked", False):
return self._load(modules, silent, auto_loaded)
else:
wasLocked = False
if self.modules_lock.is_locked():
wasLocked = True
with self.modules_lock:
self.modules_locked_by = traceback.format_stack()
t.modules_locked = True
res = self._load(modules, silent, auto_loaded)
t.modules_locked = False
self.modules_locked_by = None
return res
def _load(self, modules, silent=False, auto_loaded=False):
"The same as load but without locking"
errors = 0
app = self.app()
for mod in modules:
if not auto_loaded:
self.not_auto_loaded.add(mod)
if mod not in self.loaded_modules:
m = re_module_path.match(mod)
if not m:
raise ModuleError("Invalid module name: %s" % mod)
(module_name, class_name) = m.group(1, 2)
module = sys.modules.get(module_name)
app.inst.modules.add(module_name)
if not module:
try:
try:
__import__(module_name, globals(), locals(), [], -1)
except ImportError as e:
if silent:
logging.getLogger("%s:mg.core.Modules" % self.app().inst.instid).exception(e)
else:
raise
module = sys.modules.get(module_name)
except Exception as e:
errors += 1
module = sys.modules.get(module_name)
if module:
logging.getLogger("%s:mg.core.Modules" % self.app().inst.instid).exception(e)
else:
raise
if module:
cls = module.__dict__[class_name]
obj = cls(app, mod)
self.loaded_modules[mod] = obj
obj._register()
else:
app.inst.modules.remove(module_name)
return errors
def clear(self):
"Remove all modules"
with self.modules_lock:
self.loaded_modules.clear()
def load_all(self):
"Load all available modules"
with self.modules_lock:
self.modules_locked_by = traceback.format_stack()
t = Tasklet.current()
t.modules_locked = True
# removing automatically loaded modules
modules = []
complete = set()
for mod in self.loaded_modules.keys():
if mod in self.not_auto_loaded:
modules.append(mod)
self.loaded_modules.clear()
self.app().hooks.clear()
self._load(modules)
repeat = True
while repeat:
repeat = False
for name, mod in self.loaded_modules.items():
if name not in complete:
children = mod.child_modules()
self._load(children, auto_loaded=True, silent=True)
complete.add(name)
repeat = True
t.modules_locked = False
self.modules_locked_by = None
class ApplicationConfigUpdater(object):
"""
This module holds configuration changes and applies
it when store() called
"""
def __init__(self, app):
self.app = app
self.params = {}
self.del_params = {}
def set(self, param, value):
self.params[param] = value
try:
del self.del_params[param]
except KeyError:
pass
def delete(self, param):
self.del_params[param] = True
try:
del self.params[param]
except KeyError:
pass
def get(self, param, default=None):
if param in self.del_params:
return None
return self.params.get(param, self.app.config.get(param, default))
def store(self, update_hooks=True, notify=True):
if self.params or self.del_params:
config = self.app.config
for key, value in self.params.iteritems():
config.set(key, value)
for key, value in self.del_params.iteritems():
config.delete(key)
if update_hooks:
self.app.store_config_hooks(notify)
else:
config.store()
if notify:
self.app.hooks.call("cluster.appconfig_changed")
self.params = {}
self.app.hooks.call("config.changed")
class Application(Loggable):
"""
Application is anything that can process unified /group/hook/args
HTTP requests, call hooks, keep it's own database with configuration,
data and hooks
"""
def __init__(self, inst, tag, storage=None, keyspace=None, fqn="mg.core.applications.Application"):
"""
inst - Instance object
tag - Application tag
"""
Loggable.__init__(self, fqn)
if storage is None:
if tag == "int" or tag == "main":
storage = 1
else:
storage = 0
self.storage = storage
self.inst = inst
self.tag = tag
self.keyspace = keyspace
self.hooks = Hooks(self)
self.config = Config(self)
self.modules = Modules(self)
self.config_lock = Lock()
self.hook_lock = Lock()
self.dynamic = False
self.protocol = "http"
@property
def db(self):
try:
return self._db
except AttributeError:
pass
if self.storage == 2:
self._db = self.inst.dbpool.dbget(self.keyspace, self.mc, self.storage, self.tag)
else:
self._db = self.inst.dbpool.dbget(self.tag, self.mc, self.storage)
return self._db
@property
def mc(self):
try:
return self._mc
except AttributeError:
pass
self._mc = Memcached(self.inst.mcpool, prefix="%s-" % self.tag)
return self._mc
@property
def sql_read(self):
try:
return self._sql_read
except AttributeError:
pass
self._sql_read = self.inst.sql_read.dbget(self)
return self._sql_read
@property
def sql_write(self):
try:
return self._sql_write
except AttributeError:
pass
self._sql_write = self.inst.sql_write.dbget(self)
return self._sql_write
def obj(self, cls, uuid=None, data=None, silent=False):
"Create CassandraObject instance"
return cls(self.db, uuid=uuid, data=data, silent=silent)
def objlist(self, cls, uuids=None, **kwargs):
"Create CassandraObjectList instance"
return cls(self.db, uuids=uuids, **kwargs)
def lock(self, keys, patience=20, delay=0.1, ttl=30, reason=None):
return MemcachedLock(self.mc, keys, patience, delay, ttl, value_prefix=str(self.inst.instid) + "-", reason=reason)
def nowmonth(self):
return datetime.datetime.utcnow().strftime("%Y-%m")
def nowdate(self):
return datetime.datetime.utcnow().strftime("%Y-%m-%d")
def now(self, add=0):
return (datetime.datetime.utcnow() + datetime.timedelta(seconds=add)).strftime("%Y-%m-%d %H:%M:%S")
def now_local(self, add=0):
now = self.hooks.call("l10n.now_local", add)
if not now:
return self.now(add)
return now.strftime("%Y-%m-%d %H:%M:%S")
def yesterday_interval(self):
now = datetime.datetime.utcnow()
yesterday = (now + datetime.timedelta(seconds=-86400)).strftime("%Y-%m-%d")
today = now.strftime("%Y-%m-%d")
return '%s 00:00:00' % yesterday, '%s 00:00:00' % today
def store_config_hooks(self, notify=True):
self.config.store()
self.modules.load_all()
self.hooks.store()
if notify:
self.hooks.call("cluster.appconfig_changed")
def config_updater(self):
return ApplicationConfigUpdater(self)
def clconf(self, key, default=None):
return self.inst.dbconfig.get(key, default)
@property
def main_host(self):
return self.inst.conf("metagam", "domain", "main")
def load(self, *args, **kwargs):
"Syntactic sugar for modules.load(...)"
return self.modules.load(*args, **kwargs)
def call(self, *args, **kwargs):
"Syntactic sugar for hooks.call(...)"
return self.hooks.call(*args, **kwargs)
class TaskletLock(Lock):
def __init__(self):
Lock.__init__(self)
self.locked_by = None
self.depth = None
def __enter__(self):
task = id(Tasklet.current())
if self.locked_by and self.locked_by == task:
self.depth += 1
return self
Lock.__enter__(self)
self.locked_by = task
self.depth = 0
return self
def __exit__(self, type, value, traceback):
self.depth -= 1
if self.depth <= 0:
self.locked_by = None
Lock.__exit__(self, type, value, traceback)
class ApplicationFactory(object):
"""
ApplicationFactory returns Application object by it's tag
"""
def __init__(self, inst):
self.inst = inst
self.applications = weakref.WeakValueDictionary()
self.lock = TaskletLock()
def add(self, app):
"Add application to the factory"
self.applications[app.tag] = app
self.added(app)
def added(self, app):
pass
def remove_by_tag(self, tag):
"Remove application from the factory by its tag"
try:
app = self.applications[tag]
except KeyError:
return
self.remove(app)
def remove(self, app):
"Remove application from the factory"
try:
del self.applications[app.tag]
except KeyError:
pass
def get_by_tag(self, tag, load=True):
"Find application by tag and load it"
tag = utf2str(tag)
# Query without locking
if not load:
return self.applications.get(tag)
with self.lock:
try:
return self.applications[tag]
except KeyError:
pass
app = self.load(tag)
if app is None:
return None
self.add(app)
return app
def load(self, tag):
"Load application if not yet"
return None
| gpl-3.0 |
vitan/hue | apps/oozie/src/oozie/urls.py | 4 | 9202 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import patterns, url
IS_URL_NAMESPACED = True
urlpatterns = patterns(
'oozie.views.editor',
url(r'^list_workflows/$', 'list_workflows', name='list_workflows'),
url(r'^list_trashed_workflows/$', 'list_trashed_workflows', name='list_trashed_workflows'),
url(r'^create_workflow/$', 'create_workflow', name='create_workflow'),
url(r'^edit_workflow/(?P<workflow>\d+)$', 'edit_workflow', name='edit_workflow'),
url(r'^delete_workflow$', 'delete_workflow', name='delete_workflow'),
url(r'^restore_workflow/$', 'restore_workflow', name='restore_workflow'),
url(r'^clone_workflow/(?P<workflow>\d+)$', 'clone_workflow', name='clone_workflow'),
url(r'^submit_workflow/(?P<workflow>\d+)$', 'submit_workflow', name='submit_workflow'),
url(r'^schedule_workflow/(?P<workflow>\d+)$', 'schedule_workflow', name='schedule_workflow'),
url(r'^import_workflow/$', 'import_workflow', name='import_workflow'),
url(r'^import_coordinator/$', 'import_coordinator', name='import_coordinator'),
url(r'^export_workflow/(?P<workflow>\d+)$', 'export_workflow', name='export_workflow'),
url(r'^list_coordinators/(?P<workflow_id>[-\w]+)?$', 'list_coordinators', name='list_coordinators'),
url(r'^list_trashed_coordinators/$', 'list_trashed_coordinators', name='list_trashed_coordinators'),
url(r'^create_coordinator/(?P<workflow>[-\w]+)?$', 'create_coordinator', name='create_coordinator'),
url(r'^edit_coordinator/(?P<coordinator>[-\w]+)$', 'edit_coordinator', name='edit_coordinator'),
url(r'^delete_coordinator$', 'delete_coordinator', name='delete_coordinator'),
url(r'^restore_coordinator$', 'restore_coordinator', name='restore_coordinator'),
url(r'^clone_coordinator/(?P<coordinator>\d+)$', 'clone_coordinator', name='clone_coordinator'),
url(r'^create_coordinator_dataset/(?P<coordinator>[-\w]+)$', 'create_coordinator_dataset', name='create_coordinator_dataset'),
url(r'^edit_coordinator_dataset/(?P<dataset>\d+)$', 'edit_coordinator_dataset', name='edit_coordinator_dataset'),
url(r'^create_coordinator_data/(?P<coordinator>[-\w]+)/(?P<data_type>(input|output))$', 'create_coordinator_data', name='create_coordinator_data'),
url(r'^submit_coordinator/(?P<coordinator>\d+)$', 'submit_coordinator', name='submit_coordinator'),
url(r'^list_bundles$', 'list_bundles', name='list_bundles'),
url(r'^list_trashed_bundles$', 'list_trashed_bundles', name='list_trashed_bundles'),
url(r'^create_bundle$', 'create_bundle', name='create_bundle'),
url(r'^edit_bundle/(?P<bundle>\d+)$', 'edit_bundle', name='edit_bundle'),
url(r'^submit_bundle/(?P<bundle>\d+)$', 'submit_bundle', name='submit_bundle'),
url(r'^clone_bundle/(?P<bundle>\d+)$', 'clone_bundle', name='clone_bundle'),
url(r'^delete_bundle$', 'delete_bundle', name='delete_bundle'),
url(r'^restore_bundle$', 'restore_bundle', name='restore_bundle'),
url(r'^create_bundled_coordinator/(?P<bundle>\d+)$', 'create_bundled_coordinator', name='create_bundled_coordinator'),
url(r'^edit_bundled_coordinator/(?P<bundle>\d+)/(?P<bundled_coordinator>\d+)$', 'edit_bundled_coordinator', name='edit_bundled_coordinator'),
url(r'^list_history$', 'list_history', name='list_history'), # Unused
url(r'^list_history/(?P<record_id>[-\w]+)$', 'list_history_record', name='list_history_record'),
url(r'^install_examples/$', 'install_examples', name='install_examples'),
url(r'^jasmine', 'jasmine'),
)
urlpatterns += patterns(
'oozie.views.editor2',
url(r'^editor/workflow/list/$', 'list_editor_workflows', name='list_editor_workflows'),
url(r'^editor/workflow/edit/$', 'edit_workflow', name='edit_workflow'),
url(r'^editor/workflow/new/$', 'new_workflow', name='new_workflow'),
url(r'^editor/workflow/delete/$', 'delete_job', name='delete_editor_workflow'),
url(r'^editor/workflow/copy/$', 'copy_workflow', name='copy_workflow'),
url(r'^editor/workflow/save/$', 'save_workflow', name='save_workflow'),
url(r'^editor/workflow/submit/(?P<doc_id>\d+)$', 'submit_workflow', name='editor_submit_workflow'),
url(r'^editor/workflow/new_node/$', 'new_node', name='new_node'),
url(r'^editor/workflow/add_node/$', 'add_node', name='add_node'),
url(r'^editor/workflow/parameters/$', 'workflow_parameters', name='workflow_parameters'),
url(r'^editor/workflow/action/parameters/$', 'action_parameters', name='action_parameters'),
url(r'^editor/workflow/gen_xml/$', 'gen_xml_workflow', name='gen_xml_workflow'),
url(r'^editor/workflow/open_v1/$', 'open_old_workflow', name='open_old_workflow'),
url(r'^editor/coordinator/list/$', 'list_editor_coordinators', name='list_editor_coordinators'),
url(r'^editor/coordinator/edit/$', 'edit_coordinator', name='edit_coordinator'),
url(r'^editor/coordinator/new/$', 'new_coordinator', name='new_coordinator'),
url(r'^editor/coordinator/delete/$', 'delete_job', name='delete_editor_coordinator'),
url(r'^editor/coordinator/copy/$', 'copy_coordinator', name='copy_coordinator'),
url(r'^editor/coordinator/save/$', 'save_coordinator', name='save_coordinator'),
url(r'^editor/coordinator/submit/(?P<doc_id>\d+)$', 'submit_coordinator', name='editor_submit_coordinator'),
url(r'^editor/coordinator/gen_xml/$', 'gen_xml_coordinator', name='gen_xml_coordinator'),
url(r'^editor/coordinator/open_v1/$', 'open_old_coordinator', name='open_old_coordinator'),
url(r'^editor/coordinator/parameters/$', 'coordinator_parameters', name='coordinator_parameters'),
url(r'^editor/bundle/list/$', 'list_editor_bundles', name='list_editor_bundles'),
url(r'^editor/bundle/edit/$', 'edit_bundle', name='edit_bundle'),
url(r'^editor/bundle/new/$', 'new_bundle', name='new_bundle'),
url(r'^editor/bundle/delete/$', 'delete_job', name='delete_editor_bundle'),
url(r'^editor/bundle/copy/$', 'copy_bundle', name='copy_bundle'),
url(r'^editor/bundle/save/$', 'save_bundle', name='save_bundle'),
url(r'^editor/bundle/submit/(?P<doc_id>\d+)$', 'submit_bundle', name='editor_submit_bundle'),
url(r'^editor/bundle/open_v1/$', 'open_old_bundle', name='open_old_bundle'),
)
urlpatterns += patterns(
'oozie.views.api',
url(r'^workflows$', 'workflows', name='workflows'),
url(r'^workflows/(?P<workflow>\d+)$', 'workflow', name='workflow'),
url(r'^workflows/(?P<workflow>\d+)/save$', 'workflow_save', name='workflow_save'),
url(r'^workflows/(?P<workflow>\d+)/actions$', 'workflow_actions', name='workflow_actions'),
url(r'^workflows/(?P<workflow>\d+)/nodes/(?P<node_type>\w+)/validate$', 'workflow_validate_node', name='workflow_validate_node'),
url(r'^workflows/autocomplete_properties/$', 'autocomplete_properties', name='autocomplete_properties'),
)
urlpatterns += patterns(
'oozie.views.dashboard',
url(r'^$', 'list_oozie_workflows', name='index'),
url(r'^list_oozie_workflows/$', 'list_oozie_workflows', name='list_oozie_workflows'),
url(r'^list_oozie_coordinators/$', 'list_oozie_coordinators', name='list_oozie_coordinators'),
url(r'^list_oozie_bundles/$', 'list_oozie_bundles', name='list_oozie_bundles'),
url(r'^list_oozie_workflow/(?P<job_id>[-\w]+)/$', 'list_oozie_workflow', name='list_oozie_workflow'),
url(r'^list_oozie_coordinator/(?P<job_id>[-\w]+)/$', 'list_oozie_coordinator', name='list_oozie_coordinator'),
url(r'^list_oozie_workflow_action/(?P<action>[-\w@]+)/$', 'list_oozie_workflow_action', name='list_oozie_workflow_action'),
url(r'^list_oozie_bundle/(?P<job_id>[-\w]+)$', 'list_oozie_bundle', name='list_oozie_bundle'),
url(r'^rerun_oozie_job/(?P<job_id>[-\w]+)/(?P<app_path>.+?)$', 'rerun_oozie_job', name='rerun_oozie_job'),
url(r'^rerun_oozie_coord/(?P<job_id>[-\w]+)/(?P<app_path>.+?)$', 'rerun_oozie_coordinator', name='rerun_oozie_coord'),
url(r'^rerun_oozie_bundle/(?P<job_id>[-\w]+)/(?P<app_path>.+?)$', 'rerun_oozie_bundle', name='rerun_oozie_bundle'),
url(r'^manage_oozie_jobs/(?P<job_id>[-\w]+)/(?P<action>(start|suspend|resume|kill|rerun|change))$', 'manage_oozie_jobs', name='manage_oozie_jobs'),
url(r'^bulk_manage_oozie_jobs/$', 'bulk_manage_oozie_jobs', name='bulk_manage_oozie_jobs'),
url(r'^submit_external_job/(?P<application_path>.+?)$', 'submit_external_job', name='submit_external_job'),
url(r'^get_oozie_job_log/(?P<job_id>[-\w]+)$', 'get_oozie_job_log', name='get_oozie_job_log'),
url(r'^list_oozie_info/$', 'list_oozie_info', name='list_oozie_info'),
url(r'^list_oozie_sla/$', 'list_oozie_sla', name='list_oozie_sla'),
)
| apache-2.0 |
screwt/tablib | tablib/packages/odf3/script.py | 56 | 1106 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from .namespaces import SCRIPTNS
from .element import Element
# ODF 1.0 section 12.4.1
# The <script:event-listener> element binds an event to a macro.
# Autogenerated
def EventListener(**args):
return Element(qname = (SCRIPTNS,'event-listener'), **args)
| mit |
django-leonardo/horizon | horizon/test/tests/notifications.py | 17 | 1885 | # Copyright (C) 2015 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from django.conf import settings
from horizon import exceptions
from horizon.notifications import JSONMessage
from horizon.test import helpers as test
class NotificationTests(test.TestCase):
MESSAGES_PATH = os.path.abspath(os.path.join(settings.ROOT_PATH,
'messages'))
def _test_msg(self, path, expected_level, expected_msg=''):
msg = JSONMessage(path)
msg.load()
self.assertEqual(expected_level, msg.level_name)
self.assertEqual(expected_msg, msg.message)
def test_warning_msg(self):
path = self.MESSAGES_PATH + '/test_warning.json'
self._test_msg(path, 'warning', 'warning message')
def test_info_msg(self):
path = self.MESSAGES_PATH + '/test_info.json'
self._test_msg(path, 'info', 'info message')
def test_invalid_msg_file(self):
path = self.MESSAGES_PATH + '/test_invalid.json'
with self.assertRaises(exceptions.MessageFailure):
msg = JSONMessage(path)
msg.load()
def test_invalid_msg_file_fail_silently(self):
path = self.MESSAGES_PATH + '/test_invalid.json'
msg = JSONMessage(path, fail_silently=True)
msg.load()
self.assertTrue(msg.failed)
| apache-2.0 |
Atomistica/user-gfmd | tests/TEST_Hertz_sc100_128x128/eval.py | 1 | 2434 | # ======================================================================
# USER-GFMD - Elastic half-space methods for LAMMPS
# https://github.com/Atomistica/user-gfmd
#
# Copyright (2011-2016,2021)
# Lars Pastewka <lars.pastewka@imtek.uni-freiburg>,
# Tristan A. Sharp and others.
# See the AUTHORS file in the top-level USER-GFMD directory.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ======================================================================
#! /usr/bin/env python
import glob
from math import pi, sqrt
import sys
import numpy as np
###
# Note: For the SC cubic solid with lattice constant a and identical first and
# second nearest neighbor springs of spring constant k, the contact modulus is
# E* = 8/3 K/a
# See: Saito, J. Phys. Soc. Jpn. 73, 1816 (2004)
R = 100.0
E = 8./3
###
fns = glob.glob('gfmd.*.r.f2.out')
fns.remove('gfmd.0.r.f2.out')
if len(fns) > 1:
raise RuntimeError('More than one GFMD output found. Not sure which one to use.')
f_xy = np.loadtxt(fns[0])
nx, ny = f_xy.shape
###
r0 = 3.0
rbins = [ r0 ]
r2 = r0
while r2 < nx/4:
r2 = sqrt(r2*r2+r0*r0)
rbins += [ r2 ]
###
x = np.arange(nx)+0.5
x = np.where(x > nx/2, x-nx, x)
y = np.arange(ny)+0.5
y = np.where(y > ny/2, y-ny, y)
r_xy = np.sqrt( (x**2).reshape(-1,1) + (y**2).reshape(1,-1) )
### Pressure as a function of distance
N = np.sum(f_xy)
a = R*(3./4*( N/(E*R**2) ))**(1./3)
p0 = 3*N/(2*pi*a*a)
### Compute residual
fa_xy = np.where(r_xy<a, p0*np.sqrt(1-(r_xy/a)**2), np.zeros_like(r_xy))
res = np.sum( (f_xy - fa_xy)**2 )
if len(sys.argv) == 2 and sys.argv[1] == '--dump':
print('Residual: ', res)
print('Dumping f.out...')
np.savetxt('f.out', np.transpose([r_xy.reshape(-1), f_xy.reshape(-1), fa_xy.reshape(-1)]))
if res > 1e-2:
raise RuntimeError('Residual outside bounds: res = %f' % res)
| gpl-2.0 |
8ojangles/grunt-template-project | node_modules/node-gyp/gyp/pylib/gyp/generator/gypd.py | 1824 | 3474 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypd output module
This module produces gyp input as its output. Output files are given the
.gypd extension to avoid overwriting the .gyp files that they are generated
from. Internal references to .gyp files (such as those found in
"dependencies" sections) are not adjusted to point to .gypd files instead;
unlike other paths, which are relative to the .gyp or .gypd file, such paths
are relative to the directory from which gyp was run to create the .gypd file.
This generator module is intended to be a sample and a debugging aid, hence
the "d" for "debug" in .gypd. It is useful to inspect the results of the
various merges, expansions, and conditional evaluations performed by gyp
and to see a representation of what would be fed to a generator module.
It's not advisable to rename .gypd files produced by this module to .gyp,
because they will have all merges, expansions, and evaluations already
performed and the relevant constructs not present in the output; paths to
dependencies may be wrong; and various sections that do not belong in .gyp
files such as such as "included_files" and "*_excluded" will be present.
Output will also be stripped of comments. This is not intended to be a
general-purpose gyp pretty-printer; for that, you probably just want to
run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip
comments but won't do all of the other things done to this module's output.
The specific formatting of the output generated by this module is subject
to change.
"""
import gyp.common
import errno
import os
import pprint
# These variables should just be spit back out as variable references.
_generator_identity_variables = [
'CONFIGURATION_NAME',
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'LIB_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
'SHARED_LIB_DIR',
'SHARED_LIB_PREFIX',
'SHARED_LIB_SUFFIX',
'STATIC_LIB_PREFIX',
'STATIC_LIB_SUFFIX',
]
# gypd doesn't define a default value for OS like many other generator
# modules. Specify "-D OS=whatever" on the command line to provide a value.
generator_default_variables = {
}
# gypd supports multiple toolsets
generator_supports_multiple_toolsets = True
# TODO(mark): This always uses <, which isn't right. The input module should
# notify the generator to tell it which phase it is operating in, and this
# module should use < for the early phase and then switch to > for the late
# phase. Bonus points for carrying @ back into the output too.
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
output_files = {}
for qualified_target in target_list:
[input_file, target] = \
gyp.common.ParseQualifiedTarget(qualified_target)[0:2]
if input_file[-4:] != '.gyp':
continue
input_file_stem = input_file[:-4]
output_file = input_file_stem + params['options'].suffix + '.gypd'
if not output_file in output_files:
output_files[output_file] = input_file
for output_file, input_file in output_files.iteritems():
output = open(output_file, 'w')
pprint.pprint(data[input_file], output)
output.close()
| mit |
Axam/nsx-web | nailgun/nailgun/openstack/common/db/sqlalchemy/test_migrations.py | 7 | 11462 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2012-2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import commands
import ConfigParser
import os
import urlparse
import sqlalchemy
import sqlalchemy.exc
from nailgun.openstack.common import lockutils
from nailgun.openstack.common import log as logging
from nailgun.openstack.common import test
LOG = logging.getLogger(__name__)
def _get_connect_string(backend, user, passwd, database):
"""Get database connection
Try to get a connection with a very specific set of values, if we get
these then we'll run the tests, otherwise they are skipped
"""
if backend == "postgres":
backend = "postgresql+psycopg2"
elif backend == "mysql":
backend = "mysql+mysqldb"
else:
raise Exception("Unrecognized backend: '%s'" % backend)
return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s"
% {'backend': backend, 'user': user, 'passwd': passwd,
'database': database})
def _is_backend_avail(backend, user, passwd, database):
try:
connect_uri = _get_connect_string(backend, user, passwd, database)
engine = sqlalchemy.create_engine(connect_uri)
connection = engine.connect()
except Exception:
# intentionally catch all to handle exceptions even if we don't
# have any backend code loaded.
return False
else:
connection.close()
engine.dispose()
return True
def _have_mysql(user, passwd, database):
present = os.environ.get('TEST_MYSQL_PRESENT')
if present is None:
return _is_backend_avail('mysql', user, passwd, database)
return present.lower() in ('', 'true')
def _have_postgresql(user, passwd, database):
present = os.environ.get('TEST_POSTGRESQL_PRESENT')
if present is None:
return _is_backend_avail('postgres', user, passwd, database)
return present.lower() in ('', 'true')
def get_db_connection_info(conn_pieces):
database = conn_pieces.path.strip('/')
loc_pieces = conn_pieces.netloc.split('@')
host = loc_pieces[1]
auth_pieces = loc_pieces[0].split(':')
user = auth_pieces[0]
password = ""
if len(auth_pieces) > 1:
password = auth_pieces[1].strip()
return (user, password, database, host)
class BaseMigrationTestCase(test.BaseTestCase):
"""Base class fort testing of migration utils."""
def __init__(self, *args, **kwargs):
super(BaseMigrationTestCase, self).__init__(*args, **kwargs)
self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
'test_migrations.conf')
# Test machines can set the TEST_MIGRATIONS_CONF variable
# to override the location of the config file for migration testing
self.CONFIG_FILE_PATH = os.environ.get('TEST_MIGRATIONS_CONF',
self.DEFAULT_CONFIG_FILE)
self.test_databases = {}
self.migration_api = None
def setUp(self):
super(BaseMigrationTestCase, self).setUp()
# Load test databases from the config file. Only do this
# once. No need to re-run this on each test...
LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH)
if os.path.exists(self.CONFIG_FILE_PATH):
cp = ConfigParser.RawConfigParser()
try:
cp.read(self.CONFIG_FILE_PATH)
defaults = cp.defaults()
for key, value in defaults.items():
self.test_databases[key] = value
except ConfigParser.ParsingError as e:
self.fail("Failed to read test_migrations.conf config "
"file. Got error: %s" % e)
else:
self.fail("Failed to find test_migrations.conf config "
"file.")
self.engines = {}
for key, value in self.test_databases.items():
self.engines[key] = sqlalchemy.create_engine(value)
# We start each test case with a completely blank slate.
self._reset_databases()
def tearDown(self):
# We destroy the test data store between each test case,
# and recreate it, which ensures that we have no side-effects
# from the tests
self._reset_databases()
super(BaseMigrationTestCase, self).tearDown()
def execute_cmd(self, cmd=None):
status, output = commands.getstatusoutput(cmd)
LOG.debug(output)
self.assertEqual(0, status,
"Failed to run: %s\n%s" % (cmd, output))
@lockutils.synchronized('pgadmin', 'tests-', external=True)
def _reset_pg(self, conn_pieces):
(user, password, database, host) = get_db_connection_info(conn_pieces)
os.environ['PGPASSWORD'] = password
os.environ['PGUSER'] = user
# note(boris-42): We must create and drop database, we can't
# drop database which we have connected to, so for such
# operations there is a special database template1.
sqlcmd = ("psql -w -U %(user)s -h %(host)s -c"
" '%(sql)s' -d template1")
sql = ("drop database if exists %s;") % database
droptable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
self.execute_cmd(droptable)
sql = ("create database %s;") % database
createtable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
self.execute_cmd(createtable)
os.unsetenv('PGPASSWORD')
os.unsetenv('PGUSER')
def _reset_databases(self):
for key, engine in self.engines.items():
conn_string = self.test_databases[key]
conn_pieces = urlparse.urlparse(conn_string)
engine.dispose()
if conn_string.startswith('sqlite'):
# We can just delete the SQLite database, which is
# the easiest and cleanest solution
db_path = conn_pieces.path.strip('/')
if os.path.exists(db_path):
os.unlink(db_path)
# No need to recreate the SQLite DB. SQLite will
# create it for us if it's not there...
elif conn_string.startswith('mysql'):
# We can execute the MySQL client to destroy and re-create
# the MYSQL database, which is easier and less error-prone
# than using SQLAlchemy to do this via MetaData...trust me.
(user, password, database, host) = \
get_db_connection_info(conn_pieces)
sql = ("drop database if exists %(db)s; "
"create database %(db)s;") % {'db': database}
cmd = ("mysql -u \"%(user)s\" -p\"%(password)s\" -h %(host)s "
"-e \"%(sql)s\"") % {'user': user, 'password': password,
'host': host, 'sql': sql}
self.execute_cmd(cmd)
elif conn_string.startswith('postgresql'):
self._reset_pg(conn_pieces)
class WalkVersionsMixin(object):
def _walk_versions(self, engine=None, snake_walk=False, downgrade=True):
# Determine latest version script from the repo, then
# upgrade from 1 through to the latest, with no data
# in the databases. This just checks that the schema itself
# upgrades successfully.
# Place the database under version control
self.migration_api.version_control(engine, self.REPOSITORY,
self.INIT_VERSION)
self.assertEqual(self.INIT_VERSION,
self.migration_api.db_version(engine,
self.REPOSITORY))
LOG.debug('latest version is %s' % self.REPOSITORY.latest)
versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1)
for version in versions:
# upgrade -> downgrade -> upgrade
self._migrate_up(engine, version, with_data=True)
if snake_walk:
downgraded = self._migrate_down(
engine, version - 1, with_data=True)
if downgraded:
self._migrate_up(engine, version)
if downgrade:
# Now walk it back down to 0 from the latest, testing
# the downgrade paths.
for version in reversed(versions):
# downgrade -> upgrade -> downgrade
downgraded = self._migrate_down(engine, version - 1)
if snake_walk and downgraded:
self._migrate_up(engine, version)
self._migrate_down(engine, version - 1)
def _migrate_down(self, engine, version, with_data=False):
try:
self.migration_api.downgrade(engine, self.REPOSITORY, version)
except NotImplementedError:
# NOTE(sirp): some migrations, namely release-level
# migrations, don't support a downgrade.
return False
self.assertEqual(
version, self.migration_api.db_version(engine, self.REPOSITORY))
# NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target'
# version). So if we have any downgrade checks, they need to be run for
# the previous (higher numbered) migration.
if with_data:
post_downgrade = getattr(
self, "_post_downgrade_%03d" % (version + 1), None)
if post_downgrade:
post_downgrade(engine)
return True
def _migrate_up(self, engine, version, with_data=False):
"""migrate up to a new version of the db.
We allow for data insertion and post checks at every
migration version with special _pre_upgrade_### and
_check_### functions in the main test.
"""
# NOTE(sdague): try block is here because it's impossible to debug
# where a failed data migration happens otherwise
try:
if with_data:
data = None
pre_upgrade = getattr(
self, "_pre_upgrade_%03d" % version, None)
if pre_upgrade:
data = pre_upgrade(engine)
self.migration_api.upgrade(engine, self.REPOSITORY, version)
self.assertEqual(version,
self.migration_api.db_version(engine,
self.REPOSITORY))
if with_data:
check = getattr(self, "_check_%03d" % version, None)
if check:
check(engine, data)
except Exception:
LOG.error("Failed to migrate to version %s on engine %s" %
(version, engine))
raise
| apache-2.0 |
davidgbe/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 181 | 15664 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
import scipy.sparse as sp
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(sp.issparse(Y))
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |
jmr0/servo | tests/wpt/css-tests/tools/html5lib/html5lib/treeadapters/sax.py | 1835 | 1661 | from __future__ import absolute_import, division, unicode_literals
from xml.sax.xmlreader import AttributesNSImpl
from ..constants import adjustForeignAttributes, unadjustForeignAttributes
prefix_mapping = {}
for prefix, localName, namespace in adjustForeignAttributes.values():
if prefix is not None:
prefix_mapping[prefix] = namespace
def to_sax(walker, handler):
"""Call SAX-like content handler based on treewalker walker"""
handler.startDocument()
for prefix, namespace in prefix_mapping.items():
handler.startPrefixMapping(prefix, namespace)
for token in walker:
type = token["type"]
if type == "Doctype":
continue
elif type in ("StartTag", "EmptyTag"):
attrs = AttributesNSImpl(token["data"],
unadjustForeignAttributes)
handler.startElementNS((token["namespace"], token["name"]),
token["name"],
attrs)
if type == "EmptyTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type == "EndTag":
handler.endElementNS((token["namespace"], token["name"]),
token["name"])
elif type in ("Characters", "SpaceCharacters"):
handler.characters(token["data"])
elif type == "Comment":
pass
else:
assert False, "Unknown token type"
for prefix, namespace in prefix_mapping.items():
handler.endPrefixMapping(prefix)
handler.endDocument()
| mpl-2.0 |
gregvonkuster/tools-iuc | deprecated/tools/htseq/htseqsams2mx.py | 25 | 19952 | # May 2013
# Change to htseq as the counting engine - wrap so arbitrary number of columns created
# borged Simon Anders' "count.py" since we need a vector of counts rather than a new sam file as output
# note attribution for htseq and count.py :
# Written by Simon Anders (sanders@fs.tum.de), European Molecular Biology
# Laboratory (EMBL). (c) 2010. Released under the terms of the GNU General
# Public License v3. Part of the 'HTSeq' framework, version HTSeq-0.5.4p3
# updated ross lazarus august 2011 to NOT include region and to finesse the name as the region for bed3 format inputs
# also now sums all duplicate named regions and provides a summary of any collapsing as the info
# updated ross lazarus july 26 to respect the is_duplicate flag rather than try to second guess
# note Heng Li argues that removing dupes is a bad idea for RNA seq
# updated ross lazarus july 22 to count reads OUTSIDE each bed region during the processing of each bam
# added better sorting with decoration of a dict key later sorted and undecorated.
# code cleaned up and galaxified ross lazarus july 18 et seq.
# bams2mx.py -turns a series of bam and a bed file into a matrix of counts Usage bams2mx.py <halfwindow> <bedfile.bed> <bam1.bam>
# <bam2.bam>
# uses pysam to read and count bam reads over each bed interval for each sample for speed
# still not so fast
# TODO options -shift -unique
#
"""
how this gets run:
(vgalaxy)galaxy@iaas1-int:~$ cat database/job_working_directory/027/27014/galaxy_27014.sh
#!/bin/sh
GALAXY_LIB="/data/extended/galaxy/lib"
if [ "$GALAXY_LIB" != "None" ]; then
if [ -n "$PYTHONPATH" ]; then
PYTHONPATH="$GALAXY_LIB:$PYTHONPATH"
else
PYTHONPATH="$GALAXY_LIB"
fi
export PYTHONPATH
fi
cd /data/extended/galaxy/database/job_working_directory/027/27014
python /data/extended/galaxy/tools/rgenetics/htseqsams2mx.py -g "/data/extended/galaxy/database/files/034/dataset_34115.dat" -o "/data/extended/galaxy/database/files/034/dataset_34124.dat" -m "union" --id_attribute "gene_id" --feature_type "exon" --samf "'/data/extended/galaxy/database/files/033/dataset_33980.dat','T5A_C1PPHACXX_AGTTCC_L003_R1.fastq_bwa.sam'" --samf "'/data/extended/galaxy/database/files/033/dataset_33975.dat','T5A_C1PPHACXX_AGTTCC_L002_R1.fastq_bwa.sam'"; cd /data/extended/galaxy; /data/extended/galaxy/set_metadata.sh ./database/files /data/extended/galaxy/database/job_working_directory/027/27014 . /data/extended/galaxy/universe_wsgi.ini /data/tmp/tmpmwsElH /data/extended/galaxy/database/job_working_directory/027/27014/galaxy.json /data/extended/galaxy/database/job_working_directory/027/27014/metadata_in_HistoryDatasetAssociation_45202_sfOMGa,/data/extended/galaxy/database/job_working_directory/027/27014/metadata_kwds_HistoryDatasetAssociation_45202_gaMnxa,/data/extended/galaxy/database/job_working_directory/027/27014/metadata_out_HistoryDatasetAssociation_45202_kZPsZO,/data/extended/galaxy/database/job_working_directory/027/27014/metadata_results_HistoryDatasetAssociation_45202_bXU7IU,,/data/extended/galaxy/database/job_working_directory/027/27014/metadata_override_HistoryDatasetAssociation_45202_hyLAvh
echo $? > /data/extended/galaxy/database/job_working_directory/027/27014/galaxy_27014.ec
"""
import itertools
import optparse
import os
import sys
import time
import traceback
import warnings
import HTSeq
class Xcpt(Exception):
def __init__(self, msg):
self.msg = msg
def htseqMX(gff_filename, sam_filenames, colnames, sam_exts, sam_bais, opts):
"""
Code taken from count.py in Simon Anders HTSeq distribution
Wrapped in a loop to accept multiple bam/sam files and their names from galaxy to
produce a matrix of contig counts by sample for downstream use in edgeR and DESeq tools
"""
class UnknownChrom( Exception ):
pass
def my_showwarning( message, category, filename, lineno=None, line=None ):
sys.stdout.write( "Warning: %s\n" % message )
def invert_strand( iv ):
iv2 = iv.copy()
if iv2.strand == "+":
iv2.strand = "-"
elif iv2.strand == "-":
iv2.strand = "+"
else:
raise ValueError("Illegal strand")
return iv2
def count_reads_in_features( sam_filenames, colnames, gff_filename, opts ):
""" Hacked version of htseq count.py
"""
if opts.quiet:
warnings.filterwarnings( action="ignore", module="HTSeq" )
features = HTSeq.GenomicArrayOfSets( "auto", opts.stranded != "no" )
mapqMin = int(opts.mapqMin)
counts = {}
nreads = 0
empty = 0
ambiguous = 0
notaligned = 0
lowqual = 0
nonunique = 0
filtered = 0 # new filter_extras - need a better way to do this - independent filter tool?
gff = HTSeq.GFF_Reader( gff_filename )
try:
for i, f in enumerate(gff):
if f.type == opts.feature_type:
try:
feature_id = f.attr[ opts.id_attribute ]
except KeyError:
try:
feature_id = f.attr[ 'gene_id' ]
except KeyError:
sys.exit( "Feature at row %d %s does not contain a '%s' attribute OR a gene_id attribute - faulty GFF?" %
( (i + 1), f.name, opts.id_attribute ) )
if opts.stranded != "no" and f.iv.strand == ".":
sys.exit( "Feature %s at %s does not have strand information but you are "
"running htseq-count in stranded mode. Use '--stranded=no'." %
( f.name, f.iv ) )
features[ f.iv ] += feature_id
counts[ feature_id ] = [0 for x in colnames] # we use sami as an index here to bump counts later
except:
sys.stderr.write( "Error occured in %s.\n" % gff.get_line_number_string() )
raise
if not opts.quiet:
sys.stdout.write( "%d GFF lines processed.\n" % i )
if len( counts ) == 0 and not opts.quiet:
sys.stdout.write( "Warning: No features of type '%s' found.\n" % opts.feature_type )
for sami, sam_filename in enumerate(sam_filenames):
colname = colnames[sami]
isbam = sam_exts[sami] == 'bam'
hasbai = sam_bais[sami] > ''
if hasbai:
tempname = os.path.splitext(os.path.basename(sam_filename))[0]
tempbam = '%s_TEMP.bam' % tempname
tempbai = '%s_TEMP.bai' % tempname
os.link(sam_filename, tempbam)
os.link(sam_bais[sami], tempbai)
try:
if isbam:
if hasbai:
read_seq = HTSeq.BAM_Reader( tempbam )
else:
read_seq = HTSeq.BAM_Reader( sam_filename )
else:
read_seq = HTSeq.SAM_Reader( sam_filename )
first_read = iter(read_seq).next()
pe_mode = first_read.paired_end
except:
if isbam:
print >> sys.stderr, "Error occured when reading first line of bam file %s colname=%s \n" % (sam_filename, colname )
else:
print >> sys.stderr, "Error occured when reading first line of sam file %s colname=%s \n" % (sam_filename, colname )
raise
try:
if pe_mode:
read_seq_pe_file = read_seq
read_seq = HTSeq.pair_SAM_alignments( read_seq )
for seqi, r in enumerate(read_seq):
nreads += 1
if not pe_mode:
if not r.aligned:
notaligned += 1
continue
try:
if len(opts.filter_extras) > 0:
for extra in opts.filter_extras:
if r.optional_field(extra):
filtered += 1
continue
if r.optional_field( "NH" ) > 1:
nonunique += 1
continue
except KeyError:
pass
if r.aQual < mapqMin:
lowqual += 1
continue
if opts.stranded != "reverse":
iv_seq = ( co.ref_iv for co in r.cigar if co.type == "M" and co.size > 0 )
else:
iv_seq = ( invert_strand( co.ref_iv ) for co in r.cigar if co.type == "M" and co.size > 0 )
else:
if r[0] is not None and r[0].aligned:
if opts.stranded != "reverse":
iv_seq = ( co.ref_iv for co in r[0].cigar if co.type == "M" and co.size > 0 )
else:
iv_seq = ( invert_strand( co.ref_iv ) for co in r[0].cigar if co.type == "M" and co.size > 0 )
else:
iv_seq = tuple()
if r[1] is not None and r[1].aligned:
if opts.stranded != "reverse":
iv_seq = itertools.chain( iv_seq,
( invert_strand( co.ref_iv ) for co in r[1].cigar if co.type == "M" and co.size > 0 ) )
else:
iv_seq = itertools.chain( iv_seq,
( co.ref_iv for co in r[1].cigar if co.type == "M" and co.size > 0 ) )
else:
if r[0] is None or not r[0].aligned:
notaligned += 1
continue
try:
if ( r[0] is not None and r[0].optional_field( "NH" ) > 1 ) or \
( r[1] is not None and r[1].optional_field( "NH" ) > 1 ):
nonunique += 1
continue
except KeyError:
pass
if ( r[0] and r[0].aQual < mapqMin ) or ( r[1] and r[1].aQual < mapqMin ):
lowqual += 1
continue
try:
if opts.mode == "union":
fs = set()
for iv in iv_seq:
if iv.chrom not in features.chrom_vectors:
raise UnknownChrom
for iv2, fs2 in features[ iv ].steps():
fs = fs.union( fs2 )
elif opts.mode == "intersection-strict" or opts.mode == "intersection-nonempty":
fs = None
for iv in iv_seq:
if iv.chrom not in features.chrom_vectors:
raise UnknownChrom
for iv2, fs2 in features[ iv ].steps():
if len(fs2) > 0 or opts.mode == "intersection-strict":
if fs is None:
fs = fs2.copy()
else:
fs = fs.intersection( fs2 )
else:
sys.exit( "Illegal overlap mode %s" % opts.mode )
if fs is None or len( fs ) == 0:
empty += 1
elif len( fs ) > 1:
ambiguous += 1
else:
ck = list(fs)[0]
counts[ck][sami] += 1 # end up with counts for each sample as a list
except UnknownChrom:
if not pe_mode:
rr = r
else:
rr = r[0] if r[0] is not None else r[1]
empty += 1
if not opts.quiet:
sys.stdout.write( ( "Warning: Skipping read '%s', because chromosome " +
"'%s', to which it has been aligned, did not appear in the GFF file.\n" ) %
( rr.read.name, iv.chrom ) )
except:
if not pe_mode:
sys.stderr.write( "Error occured in %s.\n" % read_seq.get_line_number_string() )
else:
sys.stderr.write( "Error occured in %s.\n" % read_seq_pe_file.get_line_number_string() )
raise
if not opts.quiet:
sys.stdout.write( "%d sam %s processed for %s.\n" % ( seqi, "lines " if not pe_mode else "line pairs", colname ) )
return counts, empty, ambiguous, lowqual, notaligned, nonunique, filtered, nreads
warnings.showwarning = my_showwarning
assert os.path.isfile(gff_filename), '## unable to open supplied gff file %s' % gff_filename
try:
counts, empty, ambiguous, lowqual, notaligned, nonunique, filtered, nreads = count_reads_in_features( sam_filenames, colnames, gff_filename, opts)
except:
sys.stderr.write( "Error: %s\n" % str( sys.exc_info()[1] ) )
sys.stderr.write( "[Exception type: %s, raised in %s:%d]\n" %
( sys.exc_info()[1].__class__.__name__,
os.path.basename(traceback.extract_tb( sys.exc_info()[2] )[-1][0]),
traceback.extract_tb( sys.exc_info()[2] )[-1][1] ) )
sys.exit( 1 )
return counts, empty, ambiguous, lowqual, notaligned, nonunique, filtered, nreads
def usage():
print >> sys.stdout, """Usage: python htseqsams2mx.py -w <halfwindowsize> -g <gfffile.gff> -o <outfilename> [-i] [-c] --samf "<sam1.sam>,<sam1.column_header>" --samf "...<samN.column_header>" """
sys.exit(1)
if __name__ == "__main__":
"""
<command interpreter="python">
htseqsams2mx.py -w "$halfwin" -g "$gfffile" -o "$outfile" -m "union"
#for $s in $samfiles:
--samf "'${s.samf}','${s.samf.name}'"
#end for
</command>
"""
if len(sys.argv) < 2:
usage()
sys.exit(1)
starttime = time.time()
op = optparse.OptionParser()
# All tools
op.add_option('-w', '--halfwindow', default="0")
op.add_option('-m', '--mode', default="union")
op.add_option('-s', '--stranded', default="no")
op.add_option('-y', '--feature_type', default="exon")
op.add_option('-g', '--gff_file', default=None)
op.add_option('-o', '--outfname', default=None)
op.add_option('-f', '--forceName', default="false")
op.add_option('--samf', default=[], action="append")
op.add_option('--filter_extras', default=[], action="append")
op.add_option('--mapqMin', default='0')
op.add_option( "-t", "--type", type="string", dest="featuretype",
default="exon", help="feature type (3rd column in GFF file) to be used, " +
"all features of other type are ignored (default, suitable for Ensembl " +
"GTF files: exon)" )
op.add_option( "-i", "--id_attribute", type="string", dest="id_attribute",
default="gene_name", help="GTF attribute to be used as feature ID (default, " +
"suitable for Ensembl GTF files: gene_id)" )
op.add_option( "-q", "--quiet", action="store_true", dest="quiet", default=False,
help="suppress progress report and warnings" )
opts, args = op.parse_args()
halfwindow = int(opts.halfwindow)
gff_file = opts.gff_file
assert os.path.isfile(gff_file), '##ERROR htseqsams2mx: Supplied input GFF file "%s" not found' % gff_file
outfname = opts.outfname
sam_filenames = []
colnames = []
samf = opts.samf
samfsplit = [x.split(',') for x in samf] # one per samf set
samsets = []
for samfs in samfsplit:
samset = [x.replace("'", "") for x in samfs]
samset = [x.replace('"', '') for x in samset]
samsets.append(samset)
samsets = [x for x in samsets if x[0].lower() != 'none']
# just cannot stop getting these on cl! wtf in cheetah for a repeat group?
samfnames = [x[0] for x in samsets]
if len(set(samfnames)) != len(samfnames):
samnames = []
delme = []
for i, s in enumerate(samfnames):
if s in samnames:
delme.append(i)
print sys.stdout, '## WARNING htseqsams2mx: Duplicate input sam file %s in %s - ignoring dupe in 0 based position %s' %\
(s, ','.join(samfnames), str(delme))
else:
samnames.append(s) # first time
samsets = [x for i, x in enumerate(samsets) if i not in delme]
samfnames = [x[0] for x in samsets]
scolnames = [x[1]for x in samsets]
assert len(samfnames) == len(scolnames), '##ERROR sams2mx: Count of sam/cname not consistent - %d/%d' % (len(samfnames), len(scolnames))
sam_exts = [x[2] for x in samsets]
assert len(samfnames) == len(sam_exts), '##ERROR sams2mx: Count of extensions not consistent - %d/%d' % (len(samfnames), len(sam_exts))
sam_bais = [x[3] for x in samsets] # these only exist for bams and need to be finessed with a symlink so pysam will just work
for i, b in enumerate(samfnames):
assert os.path.isfile(b), '## Supplied input sam file "%s" not found' % b
sam_filenames.append(b)
sampName = scolnames[i] # better be unique
sampName = sampName.replace('#', '') # for R
sampName = sampName.replace('(', '') # for R
sampName = sampName.replace(')', '') # for R
sampName = sampName.replace(' ', '_') # for R
colnames.append(sampName)
counts, empty, ambiguous, lowqual, notaligned, nonunique, filtered, nreads = htseqMX(gff_file, sam_filenames, colnames, sam_exts, sam_bais, opts)
heads = '\t'.join(['Contig', ] + colnames)
res = [heads, ]
contigs = counts.keys()
contigs.sort()
totalc = 0
emptycontigs = 0
for contig in contigs:
thisc = sum(counts[contig])
if thisc > 0: # no output for empty contigs
totalc += thisc
crow = [contig, ] + ['%d' % x for x in counts[contig]]
res.append('\t'.join(crow))
else:
emptycontigs += 1
outf = open(opts.outfname, 'w')
outf.write('\n'.join(res))
outf.write('\n')
outf.close()
walltime = int(time.time() - starttime)
accumulatornames = ('walltime (seconds)', 'total reads read', 'total reads counted', 'number of contigs', 'total empty reads', 'total ambiguous reads', 'total low quality reads',
'total not aligned reads', 'total not unique mapping reads', 'extra filtered reads', 'empty contigs')
accums = (walltime, nreads, totalc, len(contigs), empty, ambiguous, lowqual, notaligned, nonunique, filtered, emptycontigs)
fracs = (1.0, 1.0, float(totalc) / nreads, 1.0, float(empty) / nreads, float(ambiguous) / nreads, float(lowqual) / nreads, float(notaligned) / nreads, float(nonunique) / nreads, float(filtered) / nreads, float(emptycontigs) / len(contigs))
notes = ['%s = %d (%2.3f)' % (accumulatornames[i], x, 100.0 * fracs[i]) for i, x in enumerate(accums)]
print >> sys.stdout, '\n'.join(notes)
sys.exit(0)
| mit |
aldencolerain/boringmanclan | project/views/profiles.py | 1 | 1401 | from django.contrib.auth import authenticate, login, update_session_auth_hash
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.shortcuts import render, redirect
from project.extensions.shortcuts import sensitive
from project.forms.profile_forms import *
import logging
logger = logging.getLogger(__name__)
@sensitive
def new(request):
form = CreateProfileForm()
return render(request, 'profiles/new.html', {'form': form})
@sensitive
def create(request):
form = CreateProfileForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password1']
user = form.save(commit=False)
user.set_password(password)
user.save()
user = authenticate(username=username, password=password)
login(request, user)
return redirect('home.index')
return render(request, 'profiles/new.html', {'form': form})
@sensitive
@login_required
def edit(request):
form = EditProfileForm(request.user)
return render(request, 'profiles/edit.html', {'form': form})
@sensitive
@login_required
def update(request):
form = EditProfileForm(request.user, data=request.POST)
if form.is_valid():
form.save()
update_session_auth_hash(request, form.user)
return redirect('profiles.edit')
return render(request, 'profiles/edit.html', {'form': form}) | mit |
dbaxa/GitPython | git/index/base.py | 3 | 48462 | # index.py
# Copyright (C) 2008, 2009 Michael Trier (mtrier@gmail.com) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
import tempfile
import os
import sys
import subprocess
import glob
from cStringIO import StringIO
from stat import S_ISLNK
from typ import (
BaseIndexEntry,
IndexEntry,
)
from util import (
TemporaryFileSwap,
post_clear_cache,
default_index,
git_working_dir
)
import git.objects
import git.diff as diff
from git.exc import (
GitCommandError,
CheckoutError
)
from git.objects import (
Blob,
Submodule,
Tree,
Object,
Commit,
)
from git.objects.util import Serializable
from git.util import (
IndexFileSHA1Writer,
LazyMixin,
LockedFD,
join_path_native,
file_contents_ro,
to_native_path_linux,
to_native_path
)
from fun import (
entry_key,
write_cache,
read_cache,
aggressive_tree_merge,
write_tree_from_cache,
stat_mode_to_index_mode,
S_IFGITLINK
)
from gitdb.base import IStream
from gitdb.db import MemoryDB
from gitdb.util import to_bin_sha
from itertools import izip
__all__ = ( 'IndexFile', 'CheckoutError' )
class IndexFile(LazyMixin, diff.Diffable, Serializable):
"""
Implements an Index that can be manipulated using a native implementation in
order to save git command function calls wherever possible.
It provides custom merging facilities allowing to merge without actually changing
your index or your working tree. This way you can perform own test-merges based
on the index only without having to deal with the working copy. This is useful
in case of partial working trees.
``Entries``
The index contains an entries dict whose keys are tuples of type IndexEntry
to facilitate access.
You may read the entries dict or manipulate it using IndexEntry instance, i.e.::
index.entries[index.entry_key(index_entry_instance)] = index_entry_instance
Make sure you use index.write() once you are done manipulating the index directly
before operating on it using the git command"""
__slots__ = ("repo", "version", "entries", "_extension_data", "_file_path")
_VERSION = 2 # latest version we support
S_IFGITLINK = S_IFGITLINK # a submodule
def __init__(self, repo, file_path=None):
"""Initialize this Index instance, optionally from the given ``file_path``.
If no file_path is given, we will be created from the current index file.
If a stream is not given, the stream will be initialized from the current
repository's index on demand."""
self.repo = repo
self.version = self._VERSION
self._extension_data = ''
self._file_path = file_path or self._index_path()
def _set_cache_(self, attr):
if attr == "entries":
# read the current index
# try memory map for speed
lfd = LockedFD(self._file_path)
try:
fd = lfd.open(write=False, stream=False)
except OSError:
lfd.rollback()
# in new repositories, there may be no index, which means we are empty
self.entries = dict()
return
# END exception handling
# Here it comes: on windows in python 2.5, memory maps aren't closed properly
# Hence we are in trouble if we try to delete a file that is memory mapped,
# which happens during read-tree.
# In this case, we will just read the memory in directly.
# Its insanely bad ... I am disappointed !
allow_mmap = (os.name != 'nt' or sys.version_info[1] > 5)
stream = file_contents_ro(fd, stream=True, allow_mmap=allow_mmap)
try:
self._deserialize(stream)
finally:
lfd.rollback()
# The handles will be closed on desctruction
# END read from default index on demand
else:
super(IndexFile, self)._set_cache_(attr)
def _index_path(self):
return join_path_native(self.repo.git_dir, "index")
@property
def path(self):
""" :return: Path to the index file we are representing """
return self._file_path
def _delete_entries_cache(self):
"""Safely clear the entries cache so it can be recreated"""
try:
del(self.entries)
except AttributeError:
# fails in python 2.6.5 with this exception
pass
# END exception handling
#{ Serializable Interface
def _deserialize(self, stream):
"""Initialize this instance with index values read from the given stream"""
self.version, self.entries, self._extension_data, conten_sha = read_cache(stream)
return self
def _entries_sorted(self):
""":return: list of entries, in a sorted fashion, first by path, then by stage"""
entries_sorted = self.entries.values()
entries_sorted.sort(key=lambda e: (e.path, e.stage)) # use path/stage as sort key
return entries_sorted
def _serialize(self, stream, ignore_tree_extension_data=False):
entries = self._entries_sorted()
write_cache(entries,
stream,
(ignore_tree_extension_data and None) or self._extension_data)
return self
#} END serializable interface
def write(self, file_path = None, ignore_tree_extension_data=False):
"""Write the current state to our file path or to the given one
:param file_path:
If None, we will write to our stored file path from which we have
been initialized. Otherwise we write to the given file path.
Please note that this will change the file_path of this index to
the one you gave.
:param ignore_tree_extension_data:
If True, the TREE type extension data read in the index will not
be written to disk. Use this if you have altered the index and
would like to use git-write-tree afterwards to create a tree
representing your written changes.
If this data is present in the written index, git-write-tree
will instead write the stored/cached tree.
Alternatively, use IndexFile.write_tree() to handle this case
automatically
:return: self"""
# make sure we have our entries read before getting a write lock
# else it would be done when streaming. This can happen
# if one doesn't change the index, but writes it right away
self.entries
lfd = LockedFD(file_path or self._file_path)
stream = lfd.open(write=True, stream=True)
self._serialize(stream, ignore_tree_extension_data)
lfd.commit()
# make sure we represent what we have written
if file_path is not None:
self._file_path = file_path
@post_clear_cache
@default_index
def merge_tree(self, rhs, base=None):
"""Merge the given rhs treeish into the current index, possibly taking
a common base treeish into account.
As opposed to the from_tree_ method, this allows you to use an already
existing tree as the left side of the merge
:param rhs:
treeish reference pointing to the 'other' side of the merge.
:param base:
optional treeish reference pointing to the common base of 'rhs' and
this index which equals lhs
:return:
self ( containing the merge and possibly unmerged entries in case of
conflicts )
:raise GitCommandError:
If there is a merge conflict. The error will
be raised at the first conflicting path. If you want to have proper
merge resolution to be done by yourself, you have to commit the changed
index ( or make a valid tree from it ) and retry with a three-way
index.from_tree call. """
# -i : ignore working tree status
# --aggressive : handle more merge cases
# -m : do an actual merge
args = ["--aggressive", "-i", "-m"]
if base is not None:
args.append(base)
args.append(rhs)
self.repo.git.read_tree(args)
return self
@classmethod
def new(cls, repo, *tree_sha):
""" Merge the given treeish revisions into a new index which is returned.
This method behaves like git-read-tree --aggressive when doing the merge.
:param repo: The repository treeish are located in.
:param tree_sha:
20 byte or 40 byte tree sha or tree objects
:return:
New IndexFile instance. Its path will be undefined.
If you intend to write such a merged Index, supply an alternate file_path
to its 'write' method."""
base_entries = aggressive_tree_merge(repo.odb, [to_bin_sha(str(t)) for t in tree_sha])
inst = cls(repo)
# convert to entries dict
entries = dict(izip(((e.path, e.stage) for e in base_entries),
(IndexEntry.from_base(e) for e in base_entries)))
inst.entries = entries
return inst
@classmethod
def from_tree(cls, repo, *treeish, **kwargs):
"""Merge the given treeish revisions into a new index which is returned.
The original index will remain unaltered
:param repo:
The repository treeish are located in.
:param treeish:
One, two or three Tree Objects, Commits or 40 byte hexshas. The result
changes according to the amount of trees.
If 1 Tree is given, it will just be read into a new index
If 2 Trees are given, they will be merged into a new index using a
two way merge algorithm. Tree 1 is the 'current' tree, tree 2 is the 'other'
one. It behaves like a fast-forward.
If 3 Trees are given, a 3-way merge will be performed with the first tree
being the common ancestor of tree 2 and tree 3. Tree 2 is the 'current' tree,
tree 3 is the 'other' one
:param kwargs:
Additional arguments passed to git-read-tree
:return:
New IndexFile instance. It will point to a temporary index location which
does not exist anymore. If you intend to write such a merged Index, supply
an alternate file_path to its 'write' method.
:note:
In the three-way merge case, --aggressive will be specified to automatically
resolve more cases in a commonly correct manner. Specify trivial=True as kwarg
to override that.
As the underlying git-read-tree command takes into account the current index,
it will be temporarily moved out of the way to assure there are no unsuspected
interferences."""
if len(treeish) == 0 or len(treeish) > 3:
raise ValueError("Please specify between 1 and 3 treeish, got %i" % len(treeish))
arg_list = list()
# ignore that working tree and index possibly are out of date
if len(treeish)>1:
# drop unmerged entries when reading our index and merging
arg_list.append("--reset")
# handle non-trivial cases the way a real merge does
arg_list.append("--aggressive")
# END merge handling
# tmp file created in git home directory to be sure renaming
# works - /tmp/ dirs could be on another device
tmp_index = tempfile.mktemp('','',repo.git_dir)
arg_list.append("--index-output=%s" % tmp_index)
arg_list.extend(treeish)
# move current index out of the way - otherwise the merge may fail
# as it considers existing entries. moving it essentially clears the index.
# Unfortunately there is no 'soft' way to do it.
# The TemporaryFileSwap assure the original file get put back
index_handler = TemporaryFileSwap(join_path_native(repo.git_dir, 'index'))
try:
repo.git.read_tree(*arg_list, **kwargs)
index = cls(repo, tmp_index)
index.entries # force it to read the file as we will delete the temp-file
del(index_handler) # release as soon as possible
finally:
if os.path.exists(tmp_index):
os.remove(tmp_index)
# END index merge handling
return index
# UTILITIES
def _iter_expand_paths(self, paths):
"""Expand the directories in list of paths to the corresponding paths accordingly,
Note: git will add items multiple times even if a glob overlapped
with manually specified paths or if paths where specified multiple
times - we respect that and do not prune"""
def raise_exc(e):
raise e
r = self.repo.working_tree_dir
rs = r + os.sep
for path in paths:
abs_path = path
if not os.path.isabs(abs_path):
abs_path = os.path.join(r, path)
# END make absolute path
# resolve globs if possible
if '?' in path or '*' in path or '[' in path:
for f in self._iter_expand_paths(glob.glob(abs_path)):
yield f.replace(rs, '')
continue
# END glob handling
try:
for root, dirs, files in os.walk(abs_path, onerror=raise_exc):
for rela_file in files:
# add relative paths only
yield os.path.join(root.replace(rs, ''), rela_file)
# END for each file in subdir
# END for each subdirectory
except OSError:
# was a file or something that could not be iterated
yield path.replace(rs, '')
# END path exception handling
# END for each path
def _write_path_to_stdin(self, proc, filepath, item, fmakeexc, fprogress,
read_from_stdout=True):
"""Write path to proc.stdin and make sure it processes the item, including progress.
:return: stdout string
:param read_from_stdout: if True, proc.stdout will be read after the item
was sent to stdin. In that case, it will return None
:note: There is a bug in git-update-index that prevents it from sending
reports just in time. This is why we have a version that tries to
read stdout and one which doesn't. In fact, the stdout is not
important as the piped-in files are processed anyway and just in time
:note: Newlines are essential here, gits behaviour is somewhat inconsistent
on this depending on the version, hence we try our best to deal with
newlines carefully. Usually the last newline will not be sent, instead
we will close stdin to break the pipe."""
fprogress(filepath, False, item)
rval = None
try:
proc.stdin.write("%s\n" % filepath)
except IOError:
# pipe broke, usually because some error happend
raise fmakeexc()
# END write exception handling
proc.stdin.flush()
if read_from_stdout:
rval = proc.stdout.readline().strip()
fprogress(filepath, True, item)
return rval
def iter_blobs(self, predicate = lambda t: True):
"""
:return: Iterator yielding tuples of Blob objects and stages, tuple(stage, Blob)
:param predicate:
Function(t) returning True if tuple(stage, Blob) should be yielded by the
iterator. A default filter, the BlobFilter, allows you to yield blobs
only if they match a given list of paths. """
for entry in self.entries.itervalues():
# TODO: is it necessary to convert the mode ? We did that when adding
# it to the index, right ?
mode = stat_mode_to_index_mode(entry.mode)
blob = entry.to_blob(self.repo)
blob.size = entry.size
output = (entry.stage, blob)
if predicate(output):
yield output
# END for each entry
def unmerged_blobs(self):
"""
:return:
Iterator yielding dict(path : list( tuple( stage, Blob, ...))), being
a dictionary associating a path in the index with a list containing
sorted stage/blob pairs
:note:
Blobs that have been removed in one side simply do not exist in the
given stage. I.e. a file removed on the 'other' branch whose entries
are at stage 3 will not have a stage 3 entry.
"""
is_unmerged_blob = lambda t: t[0] != 0
path_map = dict()
for stage, blob in self.iter_blobs(is_unmerged_blob):
path_map.setdefault(blob.path, list()).append((stage, blob))
# END for each unmerged blob
for l in path_map.itervalues():
l.sort()
return path_map
@classmethod
def entry_key(cls, *entry):
return entry_key(*entry)
def resolve_blobs(self, iter_blobs):
"""Resolve the blobs given in blob iterator. This will effectively remove the
index entries of the respective path at all non-null stages and add the given
blob as new stage null blob.
For each path there may only be one blob, otherwise a ValueError will be raised
claiming the path is already at stage 0.
:raise ValueError: if one of the blobs already existed at stage 0
:return: self
:note:
You will have to write the index manually once you are done, i.e.
index.resolve_blobs(blobs).write()
"""
for blob in iter_blobs:
stage_null_key = (blob.path, 0)
if stage_null_key in self.entries:
raise ValueError( "Path %r already exists at stage 0" % blob.path )
# END assert blob is not stage 0 already
# delete all possible stages
for stage in (1, 2, 3):
try:
del( self.entries[(blob.path, stage)])
except KeyError:
pass
# END ignore key errors
# END for each possible stage
self.entries[stage_null_key] = IndexEntry.from_blob(blob)
# END for each blob
return self
def update(self):
"""Reread the contents of our index file, discarding all cached information
we might have.
:note: This is a possibly dangerious operations as it will discard your changes
to index.entries
:return: self"""
self._delete_entries_cache()
# allows to lazily reread on demand
return self
def write_tree(self):
"""Writes this index to a corresponding Tree object into the repository's
object database and return it.
:return: Tree object representing this index
:note: The tree will be written even if one or more objects the tree refers to
does not yet exist in the object database. This could happen if you added
Entries to the index directly.
:raise ValueError: if there are no entries in the cache
:raise UnmergedEntriesError: """
# we obtain no lock as we just flush our contents to disk as tree
# If we are a new index, the entries access will load our data accordingly
mdb = MemoryDB()
entries = self._entries_sorted()
binsha, tree_items = write_tree_from_cache(entries, mdb, slice(0, len(entries)))
# copy changed trees only
mdb.stream_copy(mdb.sha_iter(), self.repo.odb)
# note: additional deserialization could be saved if write_tree_from_cache
# would return sorted tree entries
root_tree = Tree(self.repo, binsha, path='')
root_tree._cache = tree_items
return root_tree
def _process_diff_args(self, args):
try:
args.pop(args.index(self))
except IndexError:
pass
# END remove self
return args
def _to_relative_path(self, path):
""":return: Version of path relative to our git directory or raise ValueError
if it is not within our git direcotory"""
if not os.path.isabs(path):
return path
relative_path = path.replace(self.repo.working_tree_dir+os.sep, "")
if relative_path == path:
raise ValueError("Absolute path %r is not in git repository at %r" % (path,self.repo.working_tree_dir))
return relative_path
def _preprocess_add_items(self, items):
""" Split the items into two lists of path strings and BaseEntries. """
paths = list()
entries = list()
for item in items:
if isinstance(item, basestring):
paths.append(self._to_relative_path(item))
elif isinstance(item, (Blob, Submodule)):
entries.append(BaseIndexEntry.from_blob(item))
elif isinstance(item, BaseIndexEntry):
entries.append(item)
else:
raise TypeError("Invalid Type: %r" % item)
# END for each item
return (paths, entries)
@git_working_dir
def add(self, items, force=True, fprogress=lambda *args: None, path_rewriter=None,
write=True):
"""Add files from the working tree, specific blobs or BaseIndexEntries
to the index.
:param items:
Multiple types of items are supported, types can be mixed within one call.
Different types imply a different handling. File paths may generally be
relative or absolute.
- path string
strings denote a relative or absolute path into the repository pointing to
an existing file, i.e. CHANGES, lib/myfile.ext, '/home/gitrepo/lib/myfile.ext'.
Paths provided like this must exist. When added, they will be written
into the object database.
PathStrings may contain globs, such as 'lib/__init__*' or can be directories
like 'lib', the latter ones will add all the files within the dirctory and
subdirectories.
This equals a straight git-add.
They are added at stage 0
- Blob or Submodule object
Blobs are added as they are assuming a valid mode is set.
The file they refer to may or may not exist in the file system, but
must be a path relative to our repository.
If their sha is null ( 40*0 ), their path must exist in the file system
relative to the git repository as an object will be created from
the data at the path.
The handling now very much equals the way string paths are processed, except that
the mode you have set will be kept. This allows you to create symlinks
by settings the mode respectively and writing the target of the symlink
directly into the file. This equals a default Linux-Symlink which
is not dereferenced automatically, except that it can be created on
filesystems not supporting it as well.
Please note that globs or directories are not allowed in Blob objects.
They are added at stage 0
- BaseIndexEntry or type
Handling equals the one of Blob objects, but the stage may be
explicitly set. Please note that Index Entries require binary sha's.
:param force:
**CURRENTLY INEFFECTIVE**
If True, otherwise ignored or excluded files will be
added anyway.
As opposed to the git-add command, we enable this flag by default
as the API user usually wants the item to be added even though
they might be excluded.
:param fprogress:
Function with signature f(path, done=False, item=item) called for each
path to be added, one time once it is about to be added where done==False
and once after it was added where done=True.
item is set to the actual item we handle, either a Path or a BaseIndexEntry
Please note that the processed path is not guaranteed to be present
in the index already as the index is currently being processed.
:param path_rewriter:
Function with signature (string) func(BaseIndexEntry) function returning a path
for each passed entry which is the path to be actually recorded for the
object created from entry.path. This allows you to write an index which
is not identical to the layout of the actual files on your hard-disk.
If not None and ``items`` contain plain paths, these paths will be
converted to Entries beforehand and passed to the path_rewriter.
Please note that entry.path is relative to the git repository.
:param write:
If True, the index will be written once it was altered. Otherwise
the changes only exist in memory and are not available to git commands.
:return:
List(BaseIndexEntries) representing the entries just actually added.
:raise OSError:
if a supplied Path did not exist. Please note that BaseIndexEntry
Objects that do not have a null sha will be added even if their paths
do not exist.
"""
# sort the entries into strings and Entries, Blobs are converted to entries
# automatically
# paths can be git-added, for everything else we use git-update-index
entries_added = list()
paths, entries = self._preprocess_add_items(items)
if paths and path_rewriter:
for path in paths:
abspath = os.path.abspath(path)
gitrelative_path = abspath[len(self.repo.working_tree_dir)+1:]
blob = Blob(self.repo, Blob.NULL_BIN_SHA,
stat_mode_to_index_mode(os.stat(abspath).st_mode),
to_native_path_linux(gitrelative_path))
entries.append(BaseIndexEntry.from_blob(blob))
# END for each path
del(paths[:])
# END rewrite paths
def store_path(filepath):
"""Store file at filepath in the database and return the base index entry"""
st = os.lstat(filepath) # handles non-symlinks as well
stream = None
if S_ISLNK(st.st_mode):
stream = StringIO(os.readlink(filepath))
else:
stream = open(filepath, 'rb')
# END handle stream
fprogress(filepath, False, filepath)
istream = self.repo.odb.store(IStream(Blob.type, st.st_size, stream))
fprogress(filepath, True, filepath)
return BaseIndexEntry((stat_mode_to_index_mode(st.st_mode),
istream.binsha, 0, to_native_path_linux(filepath)))
# END utility method
# HANDLE PATHS
if paths:
assert len(entries_added) == 0
added_files = list()
for filepath in self._iter_expand_paths(paths):
entries_added.append(store_path(filepath))
# END for each filepath
# END path handling
# HANDLE ENTRIES
if entries:
null_mode_entries = [ e for e in entries if e.mode == 0 ]
if null_mode_entries:
raise ValueError("At least one Entry has a null-mode - please use index.remove to remove files for clarity")
# END null mode should be remove
# HANLDE ENTRY OBJECT CREATION
# create objects if required, otherwise go with the existing shas
null_entries_indices = [ i for i,e in enumerate(entries) if e.binsha == Object.NULL_BIN_SHA ]
if null_entries_indices:
for ei in null_entries_indices:
null_entry = entries[ei]
new_entry = store_path(null_entry.path)
# update null entry
entries[ei] = BaseIndexEntry((null_entry.mode, new_entry.binsha, null_entry.stage, null_entry.path))
# END for each entry index
# END null_entry handling
# REWRITE PATHS
# If we have to rewrite the entries, do so now, after we have generated
# all object sha's
if path_rewriter:
for i,e in enumerate(entries):
entries[i] = BaseIndexEntry((e.mode, e.binsha, e.stage, path_rewriter(e)))
# END for each entry
# END handle path rewriting
# just go through the remaining entries and provide progress info
for i, entry in enumerate(entries):
progress_sent = i in null_entries_indices
if not progress_sent:
fprogress(entry.path, False, entry)
fprogress(entry.path, True, entry)
# END handle progress
# END for each enty
entries_added.extend(entries)
# END if there are base entries
# FINALIZE
# add the new entries to this instance
for entry in entries_added:
self.entries[(entry.path, 0)] = IndexEntry.from_base(entry)
if write:
self.write()
# END handle write
return entries_added
def _items_to_rela_paths(self, items):
"""Returns a list of repo-relative paths from the given items which
may be absolute or relative paths, entries or blobs"""
paths = list()
for item in items:
if isinstance(item, (BaseIndexEntry,(Blob, Submodule))):
paths.append(self._to_relative_path(item.path))
elif isinstance(item, basestring):
paths.append(self._to_relative_path(item))
else:
raise TypeError("Invalid item type: %r" % item)
# END for each item
return paths
@post_clear_cache
@default_index
def remove(self, items, working_tree=False, **kwargs):
"""Remove the given items from the index and optionally from
the working tree as well.
:param items:
Multiple types of items are supported which may be be freely mixed.
- path string
Remove the given path at all stages. If it is a directory, you must
specify the r=True keyword argument to remove all file entries
below it. If absolute paths are given, they will be converted
to a path relative to the git repository directory containing
the working tree
The path string may include globs, such as *.c.
- Blob Object
Only the path portion is used in this case.
- BaseIndexEntry or compatible type
The only relevant information here Yis the path. The stage is ignored.
:param working_tree:
If True, the entry will also be removed from the working tree, physically
removing the respective file. This may fail if there are uncommited changes
in it.
:param kwargs:
Additional keyword arguments to be passed to git-rm, such
as 'r' to allow recurive removal of
:return:
List(path_string, ...) list of repository relative paths that have
been removed effectively.
This is interesting to know in case you have provided a directory or
globs. Paths are relative to the repository. """
args = list()
if not working_tree:
args.append("--cached")
args.append("--")
# preprocess paths
paths = self._items_to_rela_paths(items)
removed_paths = self.repo.git.rm(args, paths, **kwargs).splitlines()
# process output to gain proper paths
# rm 'path'
return [ p[4:-1] for p in removed_paths ]
@post_clear_cache
@default_index
def move(self, items, skip_errors=False, **kwargs):
"""Rename/move the items, whereas the last item is considered the destination of
the move operation. If the destination is a file, the first item ( of two )
must be a file as well. If the destination is a directory, it may be preceeded
by one or more directories or files.
The working tree will be affected in non-bare repositories.
:parma items:
Multiple types of items are supported, please see the 'remove' method
for reference.
:param skip_errors:
If True, errors such as ones resulting from missing source files will
be skpped.
:param kwargs:
Additional arguments you would like to pass to git-mv, such as dry_run
or force.
:return:List(tuple(source_path_string, destination_path_string), ...)
A list of pairs, containing the source file moved as well as its
actual destination. Relative to the repository root.
:raise ValueErorr: If only one item was given
GitCommandError: If git could not handle your request"""
args = list()
if skip_errors:
args.append('-k')
paths = self._items_to_rela_paths(items)
if len(paths) < 2:
raise ValueError("Please provide at least one source and one destination of the move operation")
was_dry_run = kwargs.pop('dry_run', kwargs.pop('n', None))
kwargs['dry_run'] = True
# first execute rename in dryrun so the command tells us what it actually does
# ( for later output )
out = list()
mvlines = self.repo.git.mv(args, paths, **kwargs).splitlines()
# parse result - first 0:n/2 lines are 'checking ', the remaining ones
# are the 'renaming' ones which we parse
for ln in xrange(len(mvlines)/2, len(mvlines)):
tokens = mvlines[ln].split(' to ')
assert len(tokens) == 2, "Too many tokens in %s" % mvlines[ln]
# [0] = Renaming x
# [1] = y
out.append((tokens[0][9:], tokens[1]))
# END for each line to parse
# either prepare for the real run, or output the dry-run result
if was_dry_run:
return out
# END handle dryrun
# now apply the actual operation
kwargs.pop('dry_run')
self.repo.git.mv(args, paths, **kwargs)
return out
def commit(self, message, parent_commits=None, head=True):
"""Commit the current default index file, creating a commit object.
For more information on the arguments, see tree.commit.
:note:
If you have manually altered the .entries member of this instance,
don't forget to write() your changes to disk beforehand.
:return:
Commit object representing the new commit"""
tree = self.write_tree()
return Commit.create_from_tree(self.repo, tree, message, parent_commits, head)
@classmethod
def _flush_stdin_and_wait(cls, proc, ignore_stdout = False):
proc.stdin.flush()
proc.stdin.close()
stdout = ''
if not ignore_stdout:
stdout = proc.stdout.read()
proc.stdout.close()
proc.wait()
return stdout
@default_index
def checkout(self, paths=None, force=False, fprogress=lambda *args: None, **kwargs):
"""Checkout the given paths or all files from the version known to the index into
the working tree.
:note: Be sure you have written pending changes using the ``write`` method
in case you have altered the enties dictionary directly
:param paths:
If None, all paths in the index will be checked out. Otherwise an iterable
of relative or absolute paths or a single path pointing to files or directories
in the index is expected.
:param force:
If True, existing files will be overwritten even if they contain local modifications.
If False, these will trigger a CheckoutError.
:param fprogress:
see Index.add_ for signature and explanation.
The provided progress information will contain None as path and item if no
explicit paths are given. Otherwise progress information will be send
prior and after a file has been checked out
:param kwargs:
Additional arguments to be pasesd to git-checkout-index
:return:
iterable yielding paths to files which have been checked out and are
guaranteed to match the version stored in the index
:raise CheckoutError:
If at least one file failed to be checked out. This is a summary,
hence it will checkout as many files as it can anyway.
If one of files or directories do not exist in the index
( as opposed to the original git command who ignores them ).
Raise GitCommandError if error lines could not be parsed - this truly is
an exceptional state
.. note:: The checkout is limited to checking out the files in the
index. Files which are not in the index anymore and exist in
the working tree will not be deleted. This behaviour is fundamentally
different to *head.checkout*, i.e. if you want git-checkout like behaviour,
use head.checkout instead of index.checkout.
"""
args = ["--index"]
if force:
args.append("--force")
def handle_stderr(proc, iter_checked_out_files):
stderr = proc.stderr.read()
if not stderr:
return
# line contents:
# git-checkout-index: this already exists
failed_files = list()
failed_reasons = list()
unknown_lines = list()
endings = (' already exists', ' is not in the cache', ' does not exist at stage', ' is unmerged')
for line in stderr.splitlines():
if not line.startswith("git checkout-index: ") and not line.startswith("git-checkout-index: "):
is_a_dir = " is a directory"
unlink_issue = "unable to unlink old '"
already_exists_issue = ' already exists, no checkout' # created by entry.c:checkout_entry(...)
if line.endswith(is_a_dir):
failed_files.append(line[:-len(is_a_dir)])
failed_reasons.append(is_a_dir)
elif line.startswith(unlink_issue):
failed_files.append(line[len(unlink_issue):line.rfind("'")])
failed_reasons.append(unlink_issue)
elif line.endswith(already_exists_issue):
failed_files.append(line[:-len(already_exists_issue)])
failed_reasons.append(already_exists_issue)
else:
unknown_lines.append(line)
continue
# END special lines parsing
for e in endings:
if line.endswith(e):
failed_files.append(line[20:-len(e)])
failed_reasons.append(e)
break
# END if ending matches
# END for each possible ending
# END for each line
if unknown_lines:
raise GitCommandError(("git-checkout-index", ), 128, stderr)
if failed_files:
valid_files = list(set(iter_checked_out_files) - set(failed_files))
raise CheckoutError("Some files could not be checked out from the index due to local modifications", failed_files, valid_files, failed_reasons)
# END stderr handler
if paths is None:
args.append("--all")
kwargs['as_process'] = 1
fprogress(None, False, None)
proc = self.repo.git.checkout_index(*args, **kwargs)
proc.wait()
fprogress(None, True, None)
rval_iter = ( e.path for e in self.entries.itervalues() )
handle_stderr(proc, rval_iter)
return rval_iter
else:
if isinstance(paths, basestring):
paths = [paths]
# make sure we have our entries loaded before we start checkout_index
# which will hold a lock on it. We try to get the lock as well during
# our entries initialization
self.entries
args.append("--stdin")
kwargs['as_process'] = True
kwargs['istream'] = subprocess.PIPE
proc = self.repo.git.checkout_index(args, **kwargs)
make_exc = lambda : GitCommandError(("git-checkout-index",)+tuple(args), 128, proc.stderr.read())
checked_out_files = list()
for path in paths:
co_path = to_native_path_linux(self._to_relative_path(path))
# if the item is not in the index, it could be a directory
path_is_directory = False
try:
self.entries[(co_path, 0)]
except KeyError:
dir = co_path
if not dir.endswith('/'):
dir += '/'
for entry in self.entries.itervalues():
if entry.path.startswith(dir):
p = entry.path
self._write_path_to_stdin(proc, p, p, make_exc,
fprogress, read_from_stdout=False)
checked_out_files.append(p)
path_is_directory = True
# END if entry is in directory
# END for each entry
# END path exception handlnig
if not path_is_directory:
self._write_path_to_stdin(proc, co_path, path, make_exc,
fprogress, read_from_stdout=False)
checked_out_files.append(co_path)
# END path is a file
# END for each path
self._flush_stdin_and_wait(proc, ignore_stdout=True)
handle_stderr(proc, checked_out_files)
return checked_out_files
# END paths handling
assert "Should not reach this point"
@default_index
def reset(self, commit='HEAD', working_tree=False, paths=None, head=False, **kwargs):
"""Reset the index to reflect the tree at the given commit. This will not
adjust our HEAD reference as opposed to HEAD.reset by default.
:param commit:
Revision, Reference or Commit specifying the commit we should represent.
If you want to specify a tree only, use IndexFile.from_tree and overwrite
the default index.
:param working_tree:
If True, the files in the working tree will reflect the changed index.
If False, the working tree will not be touched
Please note that changes to the working copy will be discarded without
warning !
:param head:
If True, the head will be set to the given commit. This is False by default,
but if True, this method behaves like HEAD.reset.
:param paths: if given as an iterable of absolute or repository-relative paths,
only these will be reset to their state at the given commit'ish.
The paths need to exist at the commit, otherwise an exception will be
raised.
:param kwargs:
Additional keyword arguments passed to git-reset
.. note:: IndexFile.reset, as opposed to HEAD.reset, will not delete anyfiles
in order to maintain a consistent working tree. Instead, it will just
checkout the files according to their state in the index.
If you want git-reset like behaviour, use *HEAD.reset* instead.
:return: self """
# what we actually want to do is to merge the tree into our existing
# index, which is what git-read-tree does
new_inst = type(self).from_tree(self.repo, commit)
if not paths:
self.entries = new_inst.entries
else:
nie = new_inst.entries
for path in paths:
path = self._to_relative_path(path)
try:
key = entry_key(path, 0)
self.entries[key] = nie[key]
except KeyError:
# if key is not in theirs, it musn't be in ours
try:
del(self.entries[key])
except KeyError:
pass
# END handle deletion keyerror
# END handle keyerror
# END for each path
# END handle paths
self.write()
if working_tree:
self.checkout(paths=paths, force=True)
# END handle working tree
if head:
self.repo.head.set_commit(self.repo.commit(commit), logmsg="%s: Updating HEAD" % commit)
# END handle head change
return self
@default_index
def diff(self, other=diff.Diffable.Index, paths=None, create_patch=False, **kwargs):
"""Diff this index against the working copy or a Tree or Commit object
For a documentation of the parameters and return values, see
Diffable.diff
:note:
Will only work with indices that represent the default git index as
they have not been initialized with a stream.
"""
# index against index is always empty
if other is self.Index:
return diff.DiffIndex()
# index against anything but None is a reverse diff with the respective
# item. Handle existing -R flags properly. Transform strings to the object
# so that we can call diff on it
if isinstance(other, basestring):
other = self.repo.rev_parse(other)
# END object conversion
if isinstance(other, Object):
# invert the existing R flag
cur_val = kwargs.get('R', False)
kwargs['R'] = not cur_val
return other.diff(self.Index, paths, create_patch, **kwargs)
# END diff against other item handlin
# if other is not None here, something is wrong
if other is not None:
raise ValueError( "other must be None, Diffable.Index, a Tree or Commit, was %r" % other )
# diff against working copy - can be handled by superclass natively
return super(IndexFile, self).diff(other, paths, create_patch, **kwargs)
| bsd-3-clause |
BrotherPhil/django | tests/gis_tests/maps/tests.py | 322 | 2099 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from unittest import skipUnless
from django.contrib.gis.geos import HAS_GEOS
from django.test import SimpleTestCase
from django.test.utils import modify_settings, override_settings
from django.utils.encoding import force_text
GOOGLE_MAPS_API_KEY = 'XXXX'
@skipUnless(HAS_GEOS, 'Geos is required.')
@modify_settings(
INSTALLED_APPS={'append': 'django.contrib.gis'},
)
class GoogleMapsTest(SimpleTestCase):
@override_settings(GOOGLE_MAPS_API_KEY=GOOGLE_MAPS_API_KEY)
def test_google_map_scripts(self):
"""
Testing GoogleMap.scripts() output. See #20773.
"""
from django.contrib.gis.maps.google.gmap import GoogleMap
google_map = GoogleMap()
scripts = google_map.scripts
self.assertIn(GOOGLE_MAPS_API_KEY, scripts)
self.assertIn("new GMap2", scripts)
@override_settings(GOOGLE_MAPS_API_KEY=GOOGLE_MAPS_API_KEY)
def test_unicode_in_google_maps(self):
"""
Test that GoogleMap doesn't crash with non-ASCII content.
"""
from django.contrib.gis.geos import Point
from django.contrib.gis.maps.google.gmap import GoogleMap, GMarker
center = Point(6.146805, 46.227574)
marker = GMarker(center,
title='En français !')
google_map = GoogleMap(center=center, zoom=18, markers=[marker])
self.assertIn("En français", google_map.scripts)
def test_gevent_html_safe(self):
from django.contrib.gis.maps.google.overlays import GEvent
event = GEvent('click', 'function() {location.href = "http://www.google.com"}')
self.assertTrue(hasattr(GEvent, '__html__'))
self.assertEqual(force_text(event), event.__html__())
def test_goverlay_html_safe(self):
from django.contrib.gis.maps.google.overlays import GOverlayBase
overlay = GOverlayBase()
overlay.js_params = '"foo", "bar"'
self.assertTrue(hasattr(GOverlayBase, '__html__'))
self.assertEqual(force_text(overlay), overlay.__html__())
| bsd-3-clause |
zeroblade1984/LG_MSM8974 | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
charleswhchan/ansible | lib/ansible/plugins/inventory/__init__.py | 50 | 2727 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from abc import ABCMeta, abstractmethod
from ansible.compat.six import with_metaclass
class InventoryParser(with_metaclass(ABCMeta, object)):
'''Abstract Base Class for retrieving inventory information
Any InventoryParser functions by taking an inven_source. The caller then
calls the parser() method. Once parser is called, the caller can access
InventoryParser.hosts for a mapping of Host objects and
InventoryParser.Groups for a mapping of Group objects.
'''
def __init__(self, inven_source):
'''
InventoryParser contructors take a source of inventory information
that they will parse the host and group information from.
'''
self.inven_source = inven_source
self.reset_parser()
@abstractmethod
def reset_parser(self):
'''
InventoryParsers generally cache their data once parser() is
called. This method initializes any parser state before calling parser
again.
'''
self.hosts = dict()
self.groups = dict()
self.parsed = False
def _merge(self, target, addition):
'''
This method is provided to InventoryParsers to merge host or group
dicts since it may take several passes to get all of the data
Example usage:
self.hosts = self.from_ini(filename)
new_hosts = self.from_script(scriptname)
self._merge(self.hosts, new_hosts)
'''
for i in addition:
if i in target:
target[i].merge(addition[i])
else:
target[i] = addition[i]
@abstractmethod
def parse(self, refresh=False):
if refresh:
self.reset_parser()
if self.parsed:
return self.parsed
# Parse self.inven_sources here
pass
| gpl-3.0 |
suto/infernal-twin | build/pip/build/lib.linux-i686-2.7/pip/_vendor/requests/packages/chardet/langcyrillicmodel.py | 2762 | 17725 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# KOI8-R language model
# Character Mapping Table:
KOI8R_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, # 80
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, # 90
223,224,225, 68,226,227,228,229,230,231,232,233,234,235,236,237, # a0
238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, # b0
27, 3, 21, 28, 13, 2, 39, 19, 26, 4, 23, 11, 8, 12, 5, 1, # c0
15, 16, 9, 7, 6, 14, 24, 10, 17, 18, 20, 25, 30, 29, 22, 54, # d0
59, 37, 44, 58, 41, 48, 53, 46, 55, 42, 60, 36, 49, 38, 31, 34, # e0
35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0
)
win1251_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246, 68,247,248,249,250,251,252,253,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
)
latin5_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
macCyrillic_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
239,240,241,242,243,244,245,246,247,248,249,250,251,252, 68, 16,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27,255,
)
IBM855_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
191,192,193,194, 68,195,196,197,198,199,200,201,202,203,204,205,
206,207,208,209,210,211,212,213,214,215,216,217, 27, 59, 54, 70,
3, 37, 21, 44, 28, 58, 13, 41, 2, 48, 39, 53, 19, 46,218,219,
220,221,222,223,224, 26, 55, 4, 42,225,226,227,228, 23, 60,229,
230,231,232,233,234,235, 11, 36,236,237,238,239,240,241,242,243,
8, 49, 12, 38, 5, 31, 1, 34, 15,244,245,246,247, 35, 16,248,
43, 9, 45, 7, 32, 6, 40, 14, 52, 24, 56, 10, 33, 17, 61,249,
250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50,251,252,255,
)
IBM866_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40
155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50
253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60
67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70
37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35,
45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43,
3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15,
191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,
207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,
223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,
9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16,
239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 97.6601%
# first 1024 sequences: 2.3389%
# rest sequences: 0.1237%
# negative sequences: 0.0009%
RussianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,1,3,3,3,3,1,3,3,3,2,3,2,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,2,2,2,2,2,0,0,2,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,2,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,2,3,3,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1,
0,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,2,3,1,3,3,1,3,3,3,3,2,2,3,0,2,2,2,3,3,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,2,3,2,3,3,3,2,1,2,2,0,1,2,2,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,3,0,2,2,3,3,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,1,2,3,2,2,3,2,3,3,3,3,2,2,3,0,3,2,2,3,1,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,3,3,3,3,2,2,2,0,3,3,3,2,2,2,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,2,3,2,2,0,1,3,2,1,2,2,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,1,1,3,0,1,1,1,1,2,1,1,0,2,2,2,1,2,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,2,2,2,2,1,3,2,3,2,3,2,1,2,2,0,1,1,2,1,2,1,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,2,3,3,3,2,2,2,2,0,2,2,2,2,3,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,2,3,2,2,3,3,3,3,3,3,3,3,3,1,3,2,0,0,3,3,3,3,2,3,3,3,3,2,3,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,2,3,3,0,2,1,0,3,2,3,2,3,0,0,1,2,0,0,1,0,1,2,1,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,3,0,2,3,3,3,3,2,3,3,3,3,1,2,2,0,0,2,3,2,2,2,3,2,3,2,2,3,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,0,2,3,2,3,0,1,2,3,3,2,0,2,3,0,0,2,3,2,2,0,1,3,1,3,2,2,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,0,2,3,3,3,3,3,3,3,3,2,1,3,2,0,0,2,2,3,3,3,2,3,3,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,3,3,0,0,1,1,1,1,1,2,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,3,3,3,3,3,0,3,2,3,3,2,3,2,0,2,1,0,1,1,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,2,2,2,3,1,3,2,3,1,1,2,1,0,2,2,2,2,1,3,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
2,2,3,3,3,3,3,1,2,2,1,3,1,0,3,0,0,3,0,0,0,1,1,0,1,2,1,0,0,0,0,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,2,1,1,3,3,3,2,2,1,2,2,3,1,1,2,0,0,2,2,1,3,0,0,2,1,1,2,1,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,3,3,1,2,2,2,1,2,1,3,3,1,1,2,1,2,1,2,2,0,2,0,0,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,3,2,1,3,2,2,3,2,0,3,2,0,3,0,1,0,1,1,0,0,1,1,1,1,0,1,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,3,3,3,2,2,2,3,3,1,2,1,2,1,0,1,0,1,1,0,1,0,0,2,1,1,1,0,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,1,1,2,1,2,3,3,2,2,1,2,2,3,0,2,1,0,0,2,2,3,2,1,2,2,2,2,2,3,1,0,
0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,1,1,0,1,1,2,2,1,1,3,0,0,1,3,1,1,1,0,0,0,1,0,1,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,3,3,3,2,0,0,0,2,1,0,1,0,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,2,3,2,2,2,1,2,2,2,1,2,1,0,0,1,1,1,0,2,0,1,1,1,0,0,1,1,
1,0,0,0,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,0,0,0,1,0,0,0,0,3,0,1,2,1,0,0,0,0,0,0,0,1,1,0,0,1,1,
1,0,1,0,1,2,0,0,1,1,2,1,0,1,1,1,1,0,1,1,1,1,0,1,0,0,1,0,0,1,1,0,
2,2,3,2,2,2,3,1,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,0,1,0,1,1,1,0,2,1,
1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,0,1,1,0,
3,3,3,2,2,2,2,3,2,2,1,1,2,2,2,2,1,1,3,1,2,1,2,0,0,1,1,0,1,0,2,1,
1,1,1,1,1,2,1,0,1,1,1,1,0,1,0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,1,1,0,
2,0,0,1,0,3,2,2,2,2,1,2,1,2,1,2,0,0,0,2,1,2,2,1,1,2,2,0,1,1,0,2,
1,1,1,1,1,0,1,1,1,2,1,1,1,2,1,0,1,2,1,1,1,1,0,1,1,1,0,0,1,0,0,1,
1,3,2,2,2,1,1,1,2,3,0,0,0,0,2,0,2,2,1,0,0,0,0,0,0,1,0,0,0,0,1,1,
1,0,1,1,0,1,0,1,1,0,1,1,0,2,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,3,2,3,2,1,2,2,2,2,1,0,0,0,2,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,2,1,
1,1,2,1,0,2,0,0,1,0,1,0,0,1,0,0,1,1,0,1,1,0,0,0,0,0,1,0,0,0,0,0,
3,0,0,1,0,2,2,2,3,2,2,2,2,2,2,2,0,0,0,2,1,2,1,1,1,2,2,0,0,0,1,2,
1,1,1,1,1,0,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0,0,1,
2,3,2,3,3,2,0,1,1,1,0,0,1,0,2,0,1,1,3,1,0,0,0,0,0,0,0,1,0,0,2,1,
1,1,1,1,1,1,1,0,1,0,1,1,1,1,0,1,1,1,0,0,1,1,0,1,0,0,0,0,0,0,1,0,
2,3,3,3,3,1,2,2,2,2,0,1,1,0,2,1,1,1,2,1,0,1,1,0,0,1,0,1,0,0,2,0,
0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,2,0,0,1,1,2,2,1,0,0,2,0,1,1,3,0,0,1,0,0,0,0,0,1,0,1,2,1,
1,1,2,0,1,1,1,0,1,0,1,1,0,1,0,1,1,1,1,0,1,0,0,0,0,0,0,1,0,1,1,0,
1,3,2,3,2,1,0,0,2,2,2,0,1,0,2,0,1,1,1,0,1,0,0,0,3,0,1,1,0,0,2,1,
1,1,1,0,1,1,0,0,0,0,1,1,0,1,0,0,2,1,1,0,1,0,0,0,1,0,1,0,0,1,1,0,
3,1,2,1,1,2,2,2,2,2,2,1,2,2,1,1,0,0,0,2,2,2,0,0,0,1,2,1,0,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,2,1,1,1,0,1,0,1,1,0,1,1,1,0,0,1,
3,0,0,0,0,2,0,1,1,1,1,1,1,1,0,1,0,0,0,1,1,1,0,1,0,1,1,0,0,1,0,1,
1,1,0,0,1,0,0,0,1,0,1,1,0,0,1,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1,
1,3,3,2,2,0,0,0,2,2,0,0,0,1,2,0,1,1,2,0,0,0,0,0,0,0,0,1,0,0,2,1,
0,1,1,0,0,1,1,0,0,0,1,1,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
2,3,2,3,2,0,0,0,0,1,1,0,0,0,2,0,2,0,2,0,0,0,0,0,1,0,0,1,0,0,1,1,
1,1,2,0,1,2,1,0,1,1,2,1,1,1,1,1,2,1,1,0,1,0,0,1,1,1,1,1,0,1,1,0,
1,3,2,2,2,1,0,0,2,2,1,0,1,2,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,1,
0,0,1,1,0,1,1,0,0,1,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,2,3,1,2,2,2,2,2,2,1,1,0,0,0,1,0,1,0,2,1,1,1,0,0,0,0,1,
1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
2,0,2,0,0,1,0,3,2,1,2,1,2,2,0,1,0,0,0,2,1,0,0,2,1,1,1,1,0,2,0,2,
2,1,1,1,1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,0,1,
1,2,2,2,2,1,0,0,1,0,0,0,0,0,2,0,1,1,1,1,0,0,0,0,1,0,1,2,0,0,2,0,
1,0,1,1,1,2,1,0,1,0,1,1,0,0,1,0,1,1,1,0,1,0,0,0,1,0,0,1,0,1,1,0,
2,1,2,2,2,0,3,0,1,1,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,1,1,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,
1,2,2,3,2,2,0,0,1,1,2,0,1,2,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,
2,2,1,1,2,1,2,2,2,2,2,1,2,2,0,1,0,0,0,1,2,2,2,1,2,1,1,1,1,1,2,1,
1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,0,1,
1,2,2,2,2,0,1,0,2,2,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
0,0,1,0,0,1,0,0,0,0,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,2,2,2,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,
0,1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,0,0,1,0,0,1,1,2,0,0,0,0,1,0,1,0,0,1,0,0,2,0,0,0,1,
0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,1,1,2,0,2,1,1,1,1,0,2,2,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,1,2,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,
0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
1,0,0,0,0,2,0,1,2,1,0,1,1,1,0,1,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,1,
0,0,0,0,0,1,0,0,1,1,0,0,1,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,
2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,0,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,1,0,1,0,1,0,0,1,1,1,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
1,1,0,1,1,0,1,0,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,1,0,1,1,0,1,0,0,0,
0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
)
Koi8rModel = {
'charToOrderMap': KOI8R_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "KOI8-R"
}
Win1251CyrillicModel = {
'charToOrderMap': win1251_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
Latin5CyrillicModel = {
'charToOrderMap': latin5_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
MacCyrillicModel = {
'charToOrderMap': macCyrillic_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "MacCyrillic"
};
Ibm866Model = {
'charToOrderMap': IBM866_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM866"
}
Ibm855Model = {
'charToOrderMap': IBM855_CharToOrderMap,
'precedenceMatrix': RussianLangModel,
'mTypicalPositiveRatio': 0.976601,
'keepEnglishLetter': False,
'charsetName': "IBM855"
}
# flake8: noqa
| gpl-3.0 |
hvy/chainer | chainer/types.py | 4 | 2253 | import numbers
import typing as tp # NOQA
import typing_extensions as tpe # NOQA
try:
from typing import TYPE_CHECKING # NOQA
except ImportError:
# typing.TYPE_CHECKING doesn't exist before Python 3.5.2
TYPE_CHECKING = False
# import chainer modules only for type checkers to avoid circular import
if TYPE_CHECKING:
from types import ModuleType # NOQA
import numpy # NOQA
from chainer import backend # NOQA
from chainer.backends import cuda, intel64 # NOQA
from chainer import initializer # NOQA
import chainerx # NOQA
Shape = tp.Tuple[int, ...]
ShapeSpec = tp.Union[int, tp.Sequence[int]] # Sequence includes Tuple[int, ...] # NOQA
DTypeSpec = tp.Union[tp.Any] # TODO(okapies): encode numpy.dtype
NdArray = tp.Union[
'numpy.ndarray',
'cuda.ndarray',
# 'intel64.mdarray',
# TODO(okapies): mdarray is partially incompatible with other ndarrays
'chainerx.ndarray',
]
"""The ndarray types supported in :func:`chainer.get_array_types`
"""
Xp = tp.Union[tp.Any] # TODO(okapies): encode numpy/cupy/ideep/chainerx
class AbstractInitializer(tpe.Protocol):
"""Protocol class for Initializer.
It can be either an :class:`chainer.Initializer` or a callable object
that takes an ndarray.
This is only for PEP 544 compliant static type checkers.
"""
dtype = None # type: tp.Optional[DTypeSpec]
def __call__(self, array: NdArray) -> None:
pass
ScalarValue = tp.Union[
'numpy.generic',
bytes,
str,
memoryview,
numbers.Number,
]
"""The scalar types supported in :func:`numpy.isscalar`.
"""
InitializerSpec = tp.Union[AbstractInitializer, ScalarValue, 'numpy.ndarray']
DeviceSpec = tp.Union[
'backend.Device',
'chainerx.Device',
'cuda.Device',
str,
tp.Tuple[str, int],
'ModuleType', # numpy and intel64 module
tp.Tuple['ModuleType', int], # cupy module and device ID
]
"""The device specifier types supported in :func:`chainer.get_device`
"""
# TODO(okapies): Use Xp instead of ModuleType
CudaDeviceSpec = tp.Union['cuda.Device', int, 'numpy.integer'] # NOQA
"""
This type only for the deprecated :func:`chainer.cuda.get_device` API.
Use :class:`~chainer.types.DeviceSpec` instead.
"""
| mit |
yongshengwang/hue | desktop/core/ext-py/Django-1.6.10/tests/utils_tests/test_dateformat.py | 57 | 6340 | from __future__ import unicode_literals
from datetime import datetime, date
import os
import time
from django.utils.dateformat import format
from django.utils import dateformat, translation, unittest
from django.utils.timezone import utc
from django.utils.tzinfo import FixedOffset, LocalTimezone
class DateFormatTests(unittest.TestCase):
def setUp(self):
self.old_TZ = os.environ.get('TZ')
os.environ['TZ'] = 'Europe/Copenhagen'
self._orig_lang = translation.get_language()
translation.activate('en-us')
try:
# Check if a timezone has been set
time.tzset()
self.tz_tests = True
except AttributeError:
# No timezone available. Don't run the tests that require a TZ
self.tz_tests = False
def tearDown(self):
translation.activate(self._orig_lang)
if self.old_TZ is None:
del os.environ['TZ']
else:
os.environ['TZ'] = self.old_TZ
# Cleanup - force re-evaluation of TZ environment variable.
if self.tz_tests:
time.tzset()
def test_date(self):
d = date(2009, 5, 16)
self.assertEqual(date.fromtimestamp(int(format(d, 'U'))), d)
def test_naive_datetime(self):
dt = datetime(2009, 5, 16, 5, 30, 30)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U'))), dt)
def test_datetime_with_local_tzinfo(self):
ltz = LocalTimezone(datetime.now())
dt = datetime(2009, 5, 16, 5, 30, 30, tzinfo=ltz)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), ltz), dt)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U'))), dt.replace(tzinfo=None))
def test_datetime_with_tzinfo(self):
tz = FixedOffset(-510)
ltz = LocalTimezone(datetime.now())
dt = datetime(2009, 5, 16, 5, 30, 30, tzinfo=tz)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), tz), dt)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), ltz), dt)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U'))), dt.astimezone(ltz).replace(tzinfo=None))
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), tz).utctimetuple(), dt.utctimetuple())
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), ltz).utctimetuple(), dt.utctimetuple())
def test_epoch(self):
udt = datetime(1970, 1, 1, tzinfo=utc)
self.assertEqual(format(udt, 'U'), '0')
def test_empty_format(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
self.assertEqual(dateformat.format(my_birthday, ''), '')
def test_am_pm(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
self.assertEqual(dateformat.format(my_birthday, 'a'), 'p.m.')
def test_microsecond(self):
# Regression test for #18951
dt = datetime(2009, 5, 16, microsecond=123)
self.assertEqual(dateformat.format(dt, 'u'), '000123')
def test_date_formats(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
timestamp = datetime(2008, 5, 19, 11, 45, 23, 123456)
self.assertEqual(dateformat.format(my_birthday, 'A'), 'PM')
self.assertEqual(dateformat.format(timestamp, 'c'), '2008-05-19T11:45:23.123456')
self.assertEqual(dateformat.format(my_birthday, 'd'), '08')
self.assertEqual(dateformat.format(my_birthday, 'j'), '8')
self.assertEqual(dateformat.format(my_birthday, 'l'), 'Sunday')
self.assertEqual(dateformat.format(my_birthday, 'L'), 'False')
self.assertEqual(dateformat.format(my_birthday, 'm'), '07')
self.assertEqual(dateformat.format(my_birthday, 'M'), 'Jul')
self.assertEqual(dateformat.format(my_birthday, 'b'), 'jul')
self.assertEqual(dateformat.format(my_birthday, 'n'), '7')
self.assertEqual(dateformat.format(my_birthday, 'N'), 'July')
def test_time_formats(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
self.assertEqual(dateformat.format(my_birthday, 'P'), '10 p.m.')
self.assertEqual(dateformat.format(my_birthday, 's'), '00')
self.assertEqual(dateformat.format(my_birthday, 'S'), 'th')
self.assertEqual(dateformat.format(my_birthday, 't'), '31')
self.assertEqual(dateformat.format(my_birthday, 'w'), '0')
self.assertEqual(dateformat.format(my_birthday, 'W'), '27')
self.assertEqual(dateformat.format(my_birthday, 'y'), '79')
self.assertEqual(dateformat.format(my_birthday, 'Y'), '1979')
self.assertEqual(dateformat.format(my_birthday, 'z'), '189')
def test_dateformat(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
self.assertEqual(dateformat.format(my_birthday, r'Y z \C\E\T'), '1979 189 CET')
self.assertEqual(dateformat.format(my_birthday, r'jS \o\f F'), '8th of July')
def test_futuredates(self):
the_future = datetime(2100, 10, 25, 0, 00)
self.assertEqual(dateformat.format(the_future, r'Y'), '2100')
def test_timezones(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
summertime = datetime(2005, 10, 30, 1, 00)
wintertime = datetime(2005, 10, 30, 4, 00)
timestamp = datetime(2008, 5, 19, 11, 45, 23, 123456)
if self.tz_tests:
self.assertEqual(dateformat.format(my_birthday, 'O'), '+0100')
self.assertEqual(dateformat.format(my_birthday, 'r'), 'Sun, 8 Jul 1979 22:00:00 +0100')
self.assertEqual(dateformat.format(my_birthday, 'T'), 'CET')
self.assertEqual(dateformat.format(my_birthday, 'U'), '300315600')
self.assertEqual(dateformat.format(timestamp, 'u'), '123456')
self.assertEqual(dateformat.format(my_birthday, 'Z'), '3600')
self.assertEqual(dateformat.format(summertime, 'I'), '1')
self.assertEqual(dateformat.format(summertime, 'O'), '+0200')
self.assertEqual(dateformat.format(wintertime, 'I'), '0')
self.assertEqual(dateformat.format(wintertime, 'O'), '+0100')
# Ticket #16924 -- We don't need timezone support to test this
# 3h30m to the west of UTC
tz = FixedOffset(-3*60 - 30)
dt = datetime(2009, 5, 16, 5, 30, 30, tzinfo=tz)
self.assertEqual(dateformat.format(dt, 'O'), '-0330')
| apache-2.0 |
channing/gyp | test/make/gyptest-noload.py | 362 | 2023 | #!/usr/bin/env python
# Copyright (c) 2010 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Tests the use of the NO_LOAD flag which makes loading sub .mk files
optional.
"""
# Python 2.5 needs this for the with statement.
from __future__ import with_statement
import os
import TestGyp
test = TestGyp.TestGyp(formats=['make'])
test.run_gyp('all.gyp', chdir='noload')
test.relocate('noload', 'relocate/noload')
test.build('build/all.gyp', test.ALL, chdir='relocate/noload')
test.run_built_executable('exe', chdir='relocate/noload',
stdout='Hello from shared.c.\n')
# Just sanity test that NO_LOAD=lib doesn't break anything.
test.build('build/all.gyp', test.ALL, chdir='relocate/noload',
arguments=['NO_LOAD=lib'])
test.run_built_executable('exe', chdir='relocate/noload',
stdout='Hello from shared.c.\n')
test.build('build/all.gyp', test.ALL, chdir='relocate/noload',
arguments=['NO_LOAD=z'])
test.run_built_executable('exe', chdir='relocate/noload',
stdout='Hello from shared.c.\n')
# Make sure we can rebuild without reloading the sub .mk file.
with open('relocate/noload/main.c', 'a') as src_file:
src_file.write("\n")
test.build('build/all.gyp', test.ALL, chdir='relocate/noload',
arguments=['NO_LOAD=lib'])
test.run_built_executable('exe', chdir='relocate/noload',
stdout='Hello from shared.c.\n')
# Change shared.c, but verify that it doesn't get rebuild if we don't load it.
with open('relocate/noload/lib/shared.c', 'w') as shared_file:
shared_file.write(
'#include "shared.h"\n'
'const char kSharedStr[] = "modified";\n'
)
test.build('build/all.gyp', test.ALL, chdir='relocate/noload',
arguments=['NO_LOAD=lib'])
test.run_built_executable('exe', chdir='relocate/noload',
stdout='Hello from shared.c.\n')
test.pass_test()
| bsd-3-clause |
atmark-techno/atmark-dist | user/python/Demo/sgi/video/VFile.py | 3 | 30270 | # Classes to read and write CMIF video files.
# (For a description of the CMIF video format, see cmif-file.ms.)
# Layers of functionality:
#
# VideoParams: maintain essential parameters of a video file
# Displayer: display a frame in a window (with some extra parameters)
# BasicVinFile: read a CMIF video file
# BasicVoutFile: write a CMIF video file
# VinFile: BasicVinFile + Displayer
# VoutFile: BasicVoutFile + Displayer
#
# XXX Future extension:
# BasicVinoutFile: supports overwriting of individual frames
# Imported modules
import sys
try:
import gl
import GL
import GET
no_gl = 0
except ImportError:
no_gl = 1
import colorsys
import imageop
# Exception raised for various occasions
Error = 'VFile.Error' # file format errors
CallError = 'VFile.CallError' # bad call
AssertError = 'VFile.AssertError' # internal malfunction
# Max nr. of colormap entries to use
MAXMAP = 4096 - 256
# Parametrizations of colormap handling based on color system.
# (These functions are used via eval with a constructed argument!)
def conv_grey(l, x, y):
return colorsys.yiq_to_rgb(l, 0, 0)
def conv_grey4(l, x, y):
return colorsys.yiq_to_rgb(l*17, 0, 0)
def conv_mono(l, x, y):
return colorsys.yiq_to_rgb(l*255, 0, 0)
def conv_yiq(y, i, q):
return colorsys.yiq_to_rgb(y, (i-0.5)*1.2, q-0.5)
def conv_hls(l, h, s):
return colorsys.hls_to_rgb(h, l, s)
def conv_hsv(v, h, s):
return colorsys.hsv_to_rgb(h, s, v)
def conv_rgb(r, g, b):
raise Error, 'Attempt to make RGB colormap'
def conv_rgb8(rgb, d1, d2):
rgb = int(rgb*255.0)
r = (rgb >> 5) & 0x07
g = (rgb ) & 0x07
b = (rgb >> 3) & 0x03
return (r/7.0, g/7.0, b/3.0)
def conv_jpeg(r, g, b):
raise Error, 'Attempt to make RGB colormap (jpeg)'
conv_jpeggrey = conv_grey
conv_grey2 = conv_grey
# Choose one of the above based upon a color system name
def choose_conversion(format):
try:
return eval('conv_' + format)
except:
raise Error, 'Unknown color system: ' + `format`
# Inverses of the above
def inv_grey(r, g, b):
y, i, q = colorsys.rgb_to_yiq(r, g, b)
return y, 0, 0
def inv_yiq(r, g, b):
y, i, q = colorsys.rgb_to_yiq(r, g, b)
return y, i/1.2 + 0.5, q + 0.5
def inv_hls(r, g, b):
h, l, s = colorsys.rgb_to_hls(r, g, b)
return l, h, s
def inv_hsv(r, g, b):
h, s, v = colorsys.rgb_to_hsv(r, g, b)
return v, h, s
def inv_rgb(r, g, b):
raise Error, 'Attempt to invert RGB colormap'
def inv_rgb8(r, g, b):
r = int(r*7.0)
g = int(g*7.0)
b = int(b*7.0)
rgb = ((r&7) << 5) | ((b&3) << 3) | (g&7)
return rgb / 255.0, 0, 0
def inv_jpeg(r, g, b):
raise Error, 'Attempt to invert RGB colormap (jpeg)'
inv_jpeggrey = inv_grey
# Choose one of the above based upon a color system name
def choose_inverse(format):
try:
return eval('inv_' + format)
except:
raise Error, 'Unknown color system: ' + `format`
# Predicate to see whether this is an entry level (non-XS) Indigo.
# If so we can lrectwrite 8-bit wide pixels into a window in RGB mode
def is_entry_indigo():
# XXX hack, hack. We should call gl.gversion() but that doesn't
# exist in earlier Python versions. Therefore we check the number
# of bitplanes *and* the size of the monitor.
xmax = gl.getgdesc(GL.GD_XPMAX)
if xmax <> 1024: return 0
ymax = gl.getgdesc(GL.GD_YPMAX)
if ymax != 768: return 0
r = gl.getgdesc(GL.GD_BITS_NORM_SNG_RED)
g = gl.getgdesc(GL.GD_BITS_NORM_SNG_GREEN)
b = gl.getgdesc(GL.GD_BITS_NORM_SNG_BLUE)
return (r, g, b) == (3, 3, 2)
# Predicate to see whether this machine supports pixmode(PM_SIZE) with
# values 1 or 4.
#
# XXX Temporarily disabled, since it is unclear which machines support
# XXX which pixelsizes.
#
# XXX The XS appears to support 4 bit pixels, but (looking at osview) it
# XXX seems as if the conversion is done by the kernel (unpacking ourselves
# XXX is faster than using PM_SIZE=4)
def support_packed_pixels():
return 0 # To be architecture-dependent
# Tables listing bits per pixel for some formats
bitsperpixel = { \
'rgb': 32, \
'rgb8': 8, \
'grey': 8, \
'grey4': 4, \
'grey2': 2, \
'mono': 1, \
'compress': 32, \
}
bppafterdecomp = {'jpeg': 32, 'jpeggrey': 8}
# Base class to manage video format parameters
class VideoParams:
# Initialize an instance.
# Set all parameters to something decent
# (except width and height are set to zero)
def __init__(self):
# Essential parameters
self.frozen = 0 # if set, can't change parameters
self.format = 'grey' # color system used
# Choose from: grey, rgb, rgb8, hsv, yiq, hls, jpeg, jpeggrey,
# mono, grey2, grey4
self.width = 0 # width of frame
self.height = 0 # height of frame
self.packfactor = 1, 1 # expansion using rectzoom
# Colormap info
self.c0bits = 8 # bits in first color dimension
self.c1bits = 0 # bits in second color dimension
self.c2bits = 0 # bits in third color dimension
self.offset = 0 # colormap index offset (XXX ???)
self.chrompack = 0 # set if separate chrominance data
self.setderived()
self.decompressor = None
# Freeze the parameters (disallow changes)
def freeze(self):
self.frozen = 1
# Unfreeze the parameters (allow changes)
def unfreeze(self):
self.frozen = 0
# Set some values derived from the standard info values
def setderived(self):
if self.frozen: raise AssertError
if bitsperpixel.has_key(self.format):
self.bpp = bitsperpixel[self.format]
else:
self.bpp = 0
xpf, ypf = self.packfactor
self.xpf = abs(xpf)
self.ypf = abs(ypf)
self.mirror_image = (xpf < 0)
self.upside_down = (ypf < 0)
self.realwidth = self.width / self.xpf
self.realheight = self.height / self.ypf
# Set colormap info
def setcmapinfo(self):
stuff = 0, 0, 0, 0, 0
if self.format in ('rgb8', 'grey'):
stuff = 8, 0, 0, 0, 0
if self.format == 'grey4':
stuff = 4, 0, 0, 0, 0
if self.format == 'grey2':
stuff = 2, 0, 0, 0, 0
if self.format == 'mono':
stuff = 1, 0, 0, 0, 0
self.c0bits, self.c1bits, self.c2bits, \
self.offset, self.chrompack = stuff
# Set the frame width and height (e.g. from gl.getsize())
def setsize(self, width, height):
if self.frozen: raise CallError
width = (width/self.xpf)*self.xpf
height = (height/self.ypf)*self.ypf
self.width, self.height = width, height
self.setderived()
# Retrieve the frame width and height (e.g. for gl.prefsize())
def getsize(self):
return (self.width, self.height)
# Set the format
def setformat(self, format):
if self.frozen: raise CallError
self.format = format
self.setderived()
self.setcmapinfo()
# Get the format
def getformat(self):
return self.format
# Set the packfactor
def setpf(self, pf):
if self.frozen: raise CallError
if type(pf) == type(1):
pf = (pf, pf)
if type(pf) is not type(()) or len(pf) <> 2: raise CallError
self.packfactor = pf
self.setderived()
# Get the packfactor
def getpf(self):
return self.packfactor
# Set all parameters
def setinfo(self, values):
if self.frozen: raise CallError
self.setformat(values[0])
self.setpf(values[3])
self.setsize(values[1], values[2])
(self.c0bits, self.c1bits, self.c2bits, \
self.offset, self.chrompack) = values[4:9]
if self.format == 'compress' and len(values) > 9:
self.compressheader = values[9]
self.setderived()
# Retrieve all parameters in a format suitable for a subsequent
# call to setinfo()
def getinfo(self):
return (self.format, self.width, self.height, self.packfactor,\
self.c0bits, self.c1bits, self.c2bits, self.offset, \
self.chrompack)
def getcompressheader(self):
return self.compressheader
def setcompressheader(self, ch):
self.compressheader = ch
# Write the relevant bits to stdout
def printinfo(self):
print 'Format: ', self.format
print 'Size: ', self.width, 'x', self.height
print 'Pack: ', self.packfactor, '; chrom:', self.chrompack
print 'Bpp: ', self.bpp
print 'Bits: ', self.c0bits, self.c1bits, self.c2bits
print 'Offset: ', self.offset
# Calculate data size, if possible
# (Not counting frame header or cdata size)
def calcframesize(self):
if not self.bpp: raise CallError
size = self.width/self.xpf * self.height/self.ypf
size = (size * self.bpp + 7) / 8
return size
# Decompress a possibly compressed frame. This method is here
# since you sometimes want to use it on a VFile instance and sometimes
# on a Displayer instance.
#
# XXXX This should also handle jpeg. Actually, the whole mechanism
# should be much more of 'ihave/iwant' style, also allowing you to
# read, say, greyscale images from a color movie.
def decompress(self, data):
if self.format <> 'compress':
return data
if not self.decompressor:
import cl
scheme = cl.QueryScheme(self.compressheader)
self.decompressor = cl.OpenDecompressor(scheme)
headersize = self.decompressor.ReadHeader(self.compressheader)
width = self.decompressor.GetParam(cl.IMAGE_WIDTH)
height = self.decompressor.GetParam(cl.IMAGE_HEIGHT)
params = [cl.ORIGINAL_FORMAT, cl.RGBX, \
cl.ORIENTATION, cl.BOTTOM_UP, \
cl.FRAME_BUFFER_SIZE, width*height*cl.BytesPerPixel(cl.RGBX)]
self.decompressor.SetParams(params)
data = self.decompressor.Decompress(1, data)
return data
# Class to display video frames in a window.
# It is the caller's responsibility to ensure that the correct window
# is current when using showframe(), initcolormap(), clear() and clearto()
class Displayer(VideoParams):
# Initialize an instance.
# This does not need a current window
def __init__(self):
if no_gl:
raise RuntimeError, \
'no gl module available, so cannot display'
VideoParams.__init__(self)
# User-settable parameters
self.magnify = 1.0 # frame magnification factor
self.xorigin = 0 # x frame offset
self.yorigin = 0 # y frame offset (from bottom)
self.quiet = 0 # if set, don't print messages
self.fallback = 1 # allow fallback to grey
# Internal flags
self.colormapinited = 0 # must initialize window
self.skipchrom = 0 # don't skip chrominance data
self.color0 = None # magic, used by clearto()
self.fixcolor0 = 0 # don't need to fix color0
self.mustunpack = (not support_packed_pixels())
# setinfo() must reset some internal flags
def setinfo(self, values):
VideoParams.setinfo(self, values)
self.colormapinited = 0
self.skipchrom = 0
self.color0 = None
self.fixcolor0 = 0
# Show one frame, initializing the window if necessary
def showframe(self, data, chromdata):
self.showpartframe(data, chromdata, \
(0,0,self.width,self.height))
def showpartframe(self, data, chromdata, (x,y,w,h)):
pmsize = self.bpp
xpf, ypf = self.xpf, self.ypf
if self.upside_down:
gl.pixmode(GL.PM_TTOB, 1)
if self.mirror_image:
gl.pixmode(GL.PM_RTOL, 1)
if self.format in ('jpeg', 'jpeggrey'):
import jpeg
data, width, height, bytes = jpeg.decompress(data)
pmsize = bytes*8
elif self.format == 'compress':
data = self.decompress(data)
pmsize = 32
elif self.format in ('mono', 'grey4'):
if self.mustunpack:
if self.format == 'mono':
data = imageop.mono2grey(data, \
w/xpf, h/ypf, 0x20, 0xdf)
elif self.format == 'grey4':
data = imageop.grey42grey(data, \
w/xpf, h/ypf)
pmsize = 8
elif self.format == 'grey2':
data = imageop.grey22grey(data, w/xpf, h/ypf)
pmsize = 8
if not self.colormapinited:
self.initcolormap()
if self.fixcolor0:
gl.mapcolor(self.color0)
self.fixcolor0 = 0
xfactor = yfactor = self.magnify
xfactor = xfactor * xpf
yfactor = yfactor * ypf
if chromdata and not self.skipchrom:
cp = self.chrompack
cx = int(x*xfactor*cp) + self.xorigin
cy = int(y*yfactor*cp) + self.yorigin
cw = (w+cp-1)/cp
ch = (h+cp-1)/cp
gl.rectzoom(xfactor*cp, yfactor*cp)
gl.pixmode(GL.PM_SIZE, 16)
gl.writemask(self.mask - ((1 << self.c0bits) - 1))
gl.lrectwrite(cx, cy, cx + cw - 1, cy + ch - 1, \
chromdata)
#
if pmsize < 32:
gl.writemask((1 << self.c0bits) - 1)
gl.pixmode(GL.PM_SIZE, pmsize)
w = w/xpf
h = h/ypf
x = x/xpf
y = y/ypf
gl.rectzoom(xfactor, yfactor)
x = int(x*xfactor)+self.xorigin
y = int(y*yfactor)+self.yorigin
gl.lrectwrite(x, y, x + w - 1, y + h - 1, data)
gl.gflush()
# Initialize the window: set RGB or colormap mode as required,
# fill in the colormap, and clear the window
def initcolormap(self):
self.colormapinited = 1
self.color0 = None
self.fixcolor0 = 0
if self.format in ('rgb', 'jpeg', 'compress'):
self.set_rgbmode()
gl.RGBcolor(200, 200, 200) # XXX rather light grey
gl.clear()
return
# This only works on an Entry-level Indigo from IRIX 4.0.5
if self.format == 'rgb8' and is_entry_indigo() and \
gl.gversion() == 'GL4DLG-4.0.': # Note trailing '.'!
self.set_rgbmode()
gl.RGBcolor(200, 200, 200) # XXX rather light grey
gl.clear()
gl.pixmode(GL.PM_SIZE, 8)
return
self.set_cmode()
self.skipchrom = 0
if self.offset == 0:
self.mask = 0x7ff
else:
self.mask = 0xfff
if not self.quiet:
sys.stderr.write('Initializing color map...')
self._initcmap()
gl.clear()
if not self.quiet:
sys.stderr.write(' Done.\n')
# Set the window in RGB mode (may be overridden for Glx window)
def set_rgbmode(self):
gl.RGBmode()
gl.gconfig()
# Set the window in colormap mode (may be overridden for Glx window)
def set_cmode(self):
gl.cmode()
gl.gconfig()
# Clear the window to a default color
def clear(self):
if not self.colormapinited: raise CallError
if gl.getdisplaymode() in (GET.DMRGB, GET.DMRGBDOUBLE):
gl.RGBcolor(200, 200, 200) # XXX rather light grey
gl.clear()
return
gl.writemask(0xffffffff)
gl.clear()
# Clear the window to a given RGB color.
# This may steal the first color index used; the next call to
# showframe() will restore the intended mapping for that index
def clearto(self, r, g, b):
if not self.colormapinited: raise CallError
if gl.getdisplaymode() in (GET.DMRGB, GET.DMRGBDOUBLE):
gl.RGBcolor(r, g, b)
gl.clear()
return
index = self.color0[0]
self.fixcolor0 = 1
gl.mapcolor(index, r, g, b)
gl.writemask(0xffffffff)
gl.clear()
gl.gflush()
# Do the hard work for initializing the colormap (internal).
# This also sets the current color to the first color index
# used -- the caller should never change this since it is used
# by clear() and clearto()
def _initcmap(self):
map = []
if self.format in ('mono', 'grey4') and self.mustunpack:
convcolor = conv_grey
else:
convcolor = choose_conversion(self.format)
maxbits = gl.getgdesc(GL.GD_BITS_NORM_SNG_CMODE)
if maxbits > 11:
maxbits = 11
c0bits = self.c0bits
c1bits = self.c1bits
c2bits = self.c2bits
if c0bits+c1bits+c2bits > maxbits:
if self.fallback and c0bits < maxbits:
# Cannot display frames in this mode, use grey
self.skipchrom = 1
c1bits = c2bits = 0
convcolor = choose_conversion('grey')
else:
raise Error, 'Sorry, '+`maxbits`+ \
' bits max on this machine'
maxc0 = 1 << c0bits
maxc1 = 1 << c1bits
maxc2 = 1 << c2bits
if self.offset == 0 and maxbits == 11:
offset = 2048
else:
offset = self.offset
if maxbits <> 11:
offset = offset & ((1<<maxbits)-1)
self.color0 = None
self.fixcolor0 = 0
for c0 in range(maxc0):
c0v = c0/float(maxc0-1)
for c1 in range(maxc1):
if maxc1 == 1:
c1v = 0
else:
c1v = c1/float(maxc1-1)
for c2 in range(maxc2):
if maxc2 == 1:
c2v = 0
else:
c2v = c2/float(maxc2-1)
index = offset + c0 + (c1<<c0bits) + \
(c2 << (c0bits+c1bits))
if index < MAXMAP:
rv, gv, bv = \
convcolor(c0v, c1v, c2v)
r, g, b = int(rv*255.0), \
int(gv*255.0), \
int(bv*255.0)
map.append((index, r, g, b))
if self.color0 == None:
self.color0 = \
index, r, g, b
self.install_colormap(map)
# Permanently make the first color index current
gl.color(self.color0[0])
# Install the colormap in the window (may be overridden for Glx window)
def install_colormap(self, map):
if not self.quiet:
sys.stderr.write(' Installing ' + `len(map)` + \
' entries...')
for irgb in map:
gl.mapcolor(irgb)
gl.gflush() # send the colormap changes to the X server
# Read a CMIF video file header.
# Return (version, values) where version is 0.0, 1.0, 2.0 or 3.[01],
# and values is ready for setinfo().
# Raise Error if there is an error in the info
def readfileheader(fp, filename):
#
# Get identifying header
#
line = fp.readline(20)
if line == 'CMIF video 0.0\n':
version = 0.0
elif line == 'CMIF video 1.0\n':
version = 1.0
elif line == 'CMIF video 2.0\n':
version = 2.0
elif line == 'CMIF video 3.0\n':
version = 3.0
elif line == 'CMIF video 3.1\n':
version = 3.1
else:
# XXX Could be version 0.0 without identifying header
raise Error, \
filename + ': Unrecognized file header: ' + `line`[:20]
compressheader = None
#
# Get color encoding info
# (The format may change to 'rgb' later when packfactor == 0)
#
if version <= 1.0:
format = 'grey'
c0bits, c1bits, c2bits = 8, 0, 0
chrompack = 0
offset = 0
elif version == 2.0:
line = fp.readline()
try:
c0bits, c1bits, c2bits, chrompack = eval(line[:-1])
except:
raise Error, filename + ': Bad 2.0 color info'
if c1bits or c2bits:
format = 'yiq'
else:
format = 'grey'
offset = 0
elif version in (3.0, 3.1):
line = fp.readline()
try:
format, rest = eval(line[:-1])
except:
raise Error, filename + ': Bad 3.[01] color info'
if format in ('rgb', 'jpeg'):
c0bits = c1bits = c2bits = 0
chrompack = 0
offset = 0
elif format == 'compress':
c0bits = c1bits = c2bits = 0
chrompack = 0
offset = 0
compressheader = rest
elif format in ('grey', 'jpeggrey', 'mono', 'grey2', 'grey4'):
c0bits = rest
c1bits = c2bits = 0
chrompack = 0
offset = 0
else:
# XXX ought to check that the format is valid
try:
c0bits, c1bits, c2bits, chrompack, offset = rest
except:
raise Error, filename + ': Bad 3.[01] color info'
if format == 'xrgb8':
format = 'rgb8' # rgb8 upside-down, for X
upside_down = 1
else:
upside_down = 0
#
# Get frame geometry info
#
line = fp.readline()
try:
x = eval(line[:-1])
except:
raise Error, filename + ': Bad (w,h,pf) info'
if type(x) <> type(()):
raise Error, filename + ': Bad (w,h,pf) info'
if len(x) == 3:
width, height, packfactor = x
if packfactor == 0 and version < 3.0:
format = 'rgb'
c0bits = 0
elif len(x) == 2 and version <= 1.0:
width, height = x
packfactor = 2
else:
raise Error, filename + ': Bad (w,h,pf) info'
if type(packfactor) is type(0):
if packfactor == 0: packfactor = 1
xpf = ypf = packfactor
else:
xpf, ypf = packfactor
if upside_down:
ypf = -ypf
packfactor = (xpf, ypf)
xpf = abs(xpf)
ypf = abs(ypf)
width = (width/xpf) * xpf
height = (height/ypf) * ypf
#
# Return (version, values)
#
values = (format, width, height, packfactor, \
c0bits, c1bits, c2bits, offset, chrompack, compressheader)
return (version, values)
# Read a *frame* header -- separate functions per version.
# Return (timecode, datasize, chromdatasize).
# Raise EOFError if end of data is reached.
# Raise Error if data is bad.
def readv0frameheader(fp):
line = fp.readline()
if not line or line == '\n': raise EOFError
try:
t = eval(line[:-1])
except:
raise Error, 'Bad 0.0 frame header'
return (t, 0, 0)
def readv1frameheader(fp):
line = fp.readline()
if not line or line == '\n': raise EOFError
try:
t, datasize = eval(line[:-1])
except:
raise Error, 'Bad 1.0 frame header'
return (t, datasize, 0)
def readv2frameheader(fp):
line = fp.readline()
if not line or line == '\n': raise EOFError
try:
t, datasize = eval(line[:-1])
except:
raise Error, 'Bad 2.0 frame header'
return (t, datasize, 0)
def readv3frameheader(fp):
line = fp.readline()
if not line or line == '\n': raise EOFError
try:
t, datasize, chromdatasize = x = eval(line[:-1])
except:
raise Error, 'Bad 3.[01] frame header'
return x
# Write a CMIF video file header (always version 3.1)
def writefileheader(fp, values):
(format, width, height, packfactor, \
c0bits, c1bits, c2bits, offset, chrompack) = values
#
# Write identifying header
#
fp.write('CMIF video 3.1\n')
#
# Write color encoding info
#
if format in ('rgb', 'jpeg'):
data = (format, 0)
elif format in ('grey', 'jpeggrey', 'mono', 'grey2', 'grey4'):
data = (format, c0bits)
else:
data = (format, (c0bits, c1bits, c2bits, chrompack, offset))
fp.write(`data`+'\n')
#
# Write frame geometry info
#
data = (width, height, packfactor)
fp.write(`data`+'\n')
def writecompressfileheader(fp, cheader, values):
(format, width, height, packfactor, \
c0bits, c1bits, c2bits, offset, chrompack) = values
#
# Write identifying header
#
fp.write('CMIF video 3.1\n')
#
# Write color encoding info
#
data = (format, cheader)
fp.write(`data`+'\n')
#
# Write frame geometry info
#
data = (width, height, packfactor)
fp.write(`data`+'\n')
# Basic class for reading CMIF video files
class BasicVinFile(VideoParams):
def __init__(self, filename):
if type(filename) != type(''):
fp = filename
filename = '???'
elif filename == '-':
fp = sys.stdin
else:
fp = open(filename, 'r')
self.initfp(fp, filename)
def initfp(self, fp, filename):
VideoParams.__init__(self)
self.fp = fp
self.filename = filename
self.version, values = readfileheader(fp, filename)
self.setinfo(values)
self.freeze()
if self.version == 0.0:
w, h, pf = self.width, self.height, self.packfactor
if pf == 0:
self._datasize = w*h*4
else:
self._datasize = (w/pf) * (h/pf)
self._readframeheader = self._readv0frameheader
elif self.version == 1.0:
self._readframeheader = readv1frameheader
elif self.version == 2.0:
self._readframeheader = readv2frameheader
elif self.version in (3.0, 3.1):
self._readframeheader = readv3frameheader
else:
raise Error, \
filename + ': Bad version: ' + `self.version`
self.framecount = 0
self.atframeheader = 1
self.eofseen = 0
self.errorseen = 0
try:
self.startpos = self.fp.tell()
self.canseek = 1
except IOError:
self.startpos = -1
self.canseek = 0
def _readv0frameheader(self, fp):
t, ds, cs = readv0frameheader(fp)
ds = self._datasize
return (t, ds, cs)
def close(self):
self.fp.close()
del self.fp
del self._readframeheader
def rewind(self):
if not self.canseek:
raise Error, self.filename + ': can\'t seek'
self.fp.seek(self.startpos)
self.framecount = 0
self.atframeheader = 1
self.eofseen = 0
self.errorseen = 0
def warmcache(self):
print '[BasicVinFile.warmcache() not implemented]'
def printinfo(self):
print 'File: ', self.filename
print 'Size: ', getfilesize(self.filename)
print 'Version: ', self.version
VideoParams.printinfo(self)
def getnextframe(self):
t, ds, cs = self.getnextframeheader()
data, cdata = self.getnextframedata(ds, cs)
return (t, data, cdata)
def skipnextframe(self):
t, ds, cs = self.getnextframeheader()
self.skipnextframedata(ds, cs)
return t
def getnextframeheader(self):
if self.eofseen: raise EOFError
if self.errorseen: raise CallError
if not self.atframeheader: raise CallError
self.atframeheader = 0
try:
return self._readframeheader(self.fp)
except Error, msg:
self.errorseen = 1
# Patch up the error message
raise Error, self.filename + ': ' + msg
except EOFError:
self.eofseen = 1
raise EOFError
def getnextframedata(self, ds, cs):
if self.eofseen: raise EOFError
if self.errorseen: raise CallError
if self.atframeheader: raise CallError
if ds:
data = self.fp.read(ds)
if len(data) < ds:
self.eofseen = 1
raise EOFError
else:
data = ''
if cs:
cdata = self.fp.read(cs)
if len(cdata) < cs:
self.eofseen = 1
raise EOFError
else:
cdata = ''
self.atframeheader = 1
self.framecount = self.framecount + 1
return (data, cdata)
def skipnextframedata(self, ds, cs):
if self.eofseen: raise EOFError
if self.errorseen: raise CallError
if self.atframeheader: raise CallError
# Note that this won't raise EOFError for a partial frame
# since there is no easy way to tell whether a seek
# ended up beyond the end of the file
if self.canseek:
self.fp.seek(ds + cs, 1) # Relative seek
else:
dummy = self.fp.read(ds + cs)
del dummy
self.atframeheader = 1
self.framecount = self.framecount + 1
# Subroutine to return a file's size in bytes
def getfilesize(filename):
import os, stat
try:
st = os.stat(filename)
return st[stat.ST_SIZE]
except os.error:
return 0
# Derived class implementing random access and index cached in the file
class RandomVinFile(BasicVinFile):
def initfp(self, fp, filename):
BasicVinFile.initfp(self, fp, filename)
self.index = []
def warmcache(self):
if len(self.index) == 0:
try:
self.readcache()
except Error:
self.buildcache()
else:
print '[RandomVinFile.warmcache(): too late]'
self.rewind()
def buildcache(self):
self.index = []
self.rewind()
while 1:
try: dummy = self.skipnextframe()
except EOFError: break
self.rewind()
def writecache(self):
# Raises IOerror if the file is not seekable & writable!
import marshal
if len(self.index) == 0:
self.buildcache()
if len(self.index) == 0:
raise Error, self.filename + ': No frames'
self.fp.seek(0, 2)
self.fp.write('\n/////CMIF/////\n')
pos = self.fp.tell()
data = `pos`
data = '\n-*-*-CMIF-*-*-\n' + data + ' '*(15-len(data)) + '\n'
try:
marshal.dump(self.index, self.fp)
self.fp.write(data)
self.fp.flush()
finally:
self.rewind()
def readcache(self):
# Raises Error if there is no cache in the file
import marshal
if len(self.index) <> 0:
raise CallError
self.fp.seek(-32, 2)
data = self.fp.read()
if data[:16] <> '\n-*-*-CMIF-*-*-\n' or data[-1:] <> '\n':
self.rewind()
raise Error, self.filename + ': No cache'
pos = eval(data[16:-1])
self.fp.seek(pos)
try:
self.index = marshal.load(self.fp)
except TypeError:
self.rewind()
raise Error, self.filename + ': Bad cache'
self.rewind()
def getnextframeheader(self):
if self.framecount < len(self.index):
return self._getindexframeheader(self.framecount)
if self.framecount > len(self.index):
raise AssertError, \
'managed to bypass index?!?'
rv = BasicVinFile.getnextframeheader(self)
if self.canseek:
pos = self.fp.tell()
self.index.append((rv, pos))
return rv
def getrandomframe(self, i):
t, ds, cs = self.getrandomframeheader(i)
data, cdata = self.getnextframedata(ds, cs)
return t, data, cdata
def getrandomframeheader(self, i):
if i < 0: raise ValueError, 'negative frame index'
if not self.canseek:
raise Error, self.filename + ': can\'t seek'
if i < len(self.index):
return self._getindexframeheader(i)
if len(self.index) > 0:
rv = self.getrandomframeheader(len(self.index)-1)
else:
self.rewind()
rv = self.getnextframeheader()
while i > self.framecount:
self.skipnextframedata()
rv = self.getnextframeheader()
return rv
def _getindexframeheader(self, i):
(rv, pos) = self.index[i]
self.fp.seek(pos)
self.framecount = i
self.atframeheader = 0
self.eofseen = 0
self.errorseen = 0
return rv
# Basic class for writing CMIF video files
class BasicVoutFile(VideoParams):
def __init__(self, filename):
if type(filename) != type(''):
fp = filename
filename = '???'
elif filename == '-':
fp = sys.stdout
else:
fp = open(filename, 'w')
self.initfp(fp, filename)
def initfp(self, fp, filename):
VideoParams.__init__(self)
self.fp = fp
self.filename = filename
self.version = 3.1 # In case anyone inquries
def flush(self):
self.fp.flush()
def close(self):
self.fp.close()
del self.fp
def prealloc(self, nframes):
if not self.frozen: raise CallError
data = '\xff' * (self.calcframesize() + 64)
pos = self.fp.tell()
for i in range(nframes):
self.fp.write(data)
self.fp.seek(pos)
def writeheader(self):
if self.frozen: raise CallError
if self.format == 'compress':
writecompressfileheader(self.fp, self.compressheader, \
self.getinfo())
else:
writefileheader(self.fp, self.getinfo())
self.freeze()
self.atheader = 1
self.framecount = 0
def rewind(self):
self.fp.seek(0)
self.unfreeze()
self.atheader = 1
self.framecount = 0
def printinfo(self):
print 'File: ', self.filename
VideoParams.printinfo(self)
def writeframe(self, t, data, cdata):
if data: ds = len(data)
else: ds = 0
if cdata: cs = len(cdata)
else: cs = 0
self.writeframeheader(t, ds, cs)
self.writeframedata(data, cdata)
def writeframeheader(self, t, ds, cs):
if not self.frozen: self.writeheader()
if not self.atheader: raise CallError
data = `(t, ds, cs)`
n = len(data)
if n < 63: data = data + ' '*(63-n)
self.fp.write(data + '\n')
self.atheader = 0
def writeframedata(self, data, cdata):
if not self.frozen or self.atheader: raise CallError
if data: self.fp.write(data)
if cdata: self.fp.write(cdata)
self.atheader = 1
self.framecount = self.framecount + 1
# Classes that combine files with displayers:
class VinFile(RandomVinFile, Displayer):
def initfp(self, fp, filename):
Displayer.__init__(self)
RandomVinFile.initfp(self, fp, filename)
def shownextframe(self):
t, data, cdata = self.getnextframe()
self.showframe(data, cdata)
return t
class VoutFile(BasicVoutFile, Displayer):
def initfp(self, fp, filename):
Displayer.__init__(self)
## Grabber.__init__(self) # XXX not needed
BasicVoutFile.initfp(self, fp, filename)
# Simple test program (VinFile only)
def test():
import time
if sys.argv[1:]: filename = sys.argv[1]
else: filename = 'film.video'
vin = VinFile(filename)
vin.printinfo()
gl.foreground()
gl.prefsize(vin.getsize())
wid = gl.winopen(filename)
vin.initcolormap()
t0 = time.time()
while 1:
try: t, data, cdata = vin.getnextframe()
except EOFError: break
dt = t0 + t - time.time()
if dt > 0: time.time(dt)
vin.showframe(data, cdata)
time.sleep(2)
| gpl-2.0 |
TangXT/edx-platform | common/lib/xmodule/xmodule/tests/test_xblock_wrappers.py | 9 | 14097 | """
Tests for the wrapping layer that provides the XBlock API using XModule/Descriptor
functionality
"""
# For tests, ignore access to protected members
# pylint: disable=protected-access
import webob
import ddt
from factory import (
BUILD_STRATEGY,
Factory,
lazy_attribute,
LazyAttributeSequence,
post_generation,
SubFactory,
use_strategy,
)
from fs.memoryfs import MemoryFS
from lxml import etree
from mock import Mock
from unittest.case import SkipTest, TestCase
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from opaque_keys.edx.locations import Location
from xmodule.x_module import ModuleSystem, XModule, XModuleDescriptor, DescriptorSystem, STUDENT_VIEW, STUDIO_VIEW
from xmodule.annotatable_module import AnnotatableDescriptor
from xmodule.capa_module import CapaDescriptor
from xmodule.course_module import CourseDescriptor
from xmodule.combined_open_ended_module import CombinedOpenEndedDescriptor
from xmodule.discussion_module import DiscussionDescriptor
from xmodule.gst_module import GraphicalSliderToolDescriptor
from xmodule.html_module import HtmlDescriptor
from xmodule.peer_grading_module import PeerGradingDescriptor
from xmodule.poll_module import PollDescriptor
from xmodule.word_cloud_module import WordCloudDescriptor
from xmodule.crowdsource_hinter import CrowdsourceHinterDescriptor
#from xmodule.video_module import VideoDescriptor
from xmodule.seq_module import SequenceDescriptor
from xmodule.conditional_module import ConditionalDescriptor
from xmodule.randomize_module import RandomizeDescriptor
from xmodule.vertical_module import VerticalDescriptor
from xmodule.wrapper_module import WrapperDescriptor
from xmodule.tests import get_test_descriptor_system, get_test_system
# A dictionary that maps specific XModuleDescriptor classes without children
# to a list of sample field values to test with.
# TODO: Add more types of sample data
LEAF_XMODULES = {
AnnotatableDescriptor: [{}],
CapaDescriptor: [{}],
CombinedOpenEndedDescriptor: [{}],
DiscussionDescriptor: [{}],
GraphicalSliderToolDescriptor: [{}],
HtmlDescriptor: [{}],
PeerGradingDescriptor: [{}],
PollDescriptor: [{'display_name': 'Poll Display Name'}],
WordCloudDescriptor: [{}],
# This is being excluded because it has dependencies on django
#VideoDescriptor,
}
# A dictionary that maps specific XModuleDescriptor classes with children
# to a list of sample field values to test with.
# TODO: Add more types of sample data
CONTAINER_XMODULES = {
ConditionalDescriptor: [{}],
CourseDescriptor: [{}],
CrowdsourceHinterDescriptor: [{}],
RandomizeDescriptor: [{}],
SequenceDescriptor: [{}],
VerticalDescriptor: [{}],
WrapperDescriptor: [{}],
}
# These modules are editable in studio yet
NOT_STUDIO_EDITABLE = (
CrowdsourceHinterDescriptor,
GraphicalSliderToolDescriptor,
PollDescriptor
)
def flatten(class_dict):
"""
Flatten a dict from cls -> [fields, ...] and yields values of the form (cls, fields)
for each entry in the dictionary value.
"""
for cls, fields_list in class_dict.items():
for fields in fields_list:
yield (cls, fields)
@use_strategy(BUILD_STRATEGY)
class ModuleSystemFactory(Factory):
"""
Factory to build a test ModuleSystem. Creation is
performed by :func:`xmodule.tests.get_test_system`, so
arguments for that function are valid factory attributes.
"""
FACTORY_FOR = ModuleSystem
@classmethod
def _build(cls, target_class, *args, **kwargs): # pylint: disable=unused-argument
"""See documentation from :meth:`factory.Factory._build`"""
return get_test_system(*args, **kwargs)
@use_strategy(BUILD_STRATEGY)
class DescriptorSystemFactory(Factory):
"""
Factory to build a test DescriptorSystem. Creation is
performed by :func:`xmodule.tests.get_test_descriptor_system`, so
arguments for that function are valid factory attributes.
"""
FACTORY_FOR = DescriptorSystem
@classmethod
def _build(cls, target_class, *args, **kwargs): # pylint: disable=unused-argument
"""See documentation from :meth:`factory.Factory._build`"""
return get_test_descriptor_system(*args, **kwargs)
class ContainerModuleRuntimeFactory(ModuleSystemFactory):
"""
Factory to generate a ModuleRuntime that generates children when asked
for them, for testing container XModules.
"""
@post_generation
def depth(self, create, depth, **kwargs): # pylint: disable=unused-argument
"""
When `depth` is specified as a Factory parameter, creates a
tree of children with that many levels.
"""
# pylint: disable=no-member
if depth == 0:
self.get_module.side_effect = lambda x: LeafModuleFactory(descriptor_cls=HtmlDescriptor)
else:
self.get_module.side_effect = lambda x: ContainerModuleFactory(descriptor_cls=VerticalDescriptor, depth=depth - 1)
@post_generation
def position(self, create, position=2, **kwargs): # pylint: disable=unused-argument, method-hidden
"""
Update the position attribute of the generated ModuleRuntime.
"""
self.position = position
class ContainerDescriptorRuntimeFactory(DescriptorSystemFactory):
"""
Factory to generate a DescriptorRuntime that generates children when asked
for them, for testing container XModuleDescriptors.
"""
@post_generation
def depth(self, create, depth, **kwargs): # pylint: disable=unused-argument
"""
When `depth` is specified as a Factory parameter, creates a
tree of children with that many levels.
"""
# pylint: disable=no-member
if depth == 0:
self.load_item.side_effect = lambda x: LeafModuleFactory(descriptor_cls=HtmlDescriptor)
else:
self.load_item.side_effect = lambda x: ContainerModuleFactory(descriptor_cls=VerticalDescriptor, depth=depth - 1)
@post_generation
def position(self, create, position=2, **kwargs): # pylint: disable=unused-argument, method-hidden
"""
Update the position attribute of the generated ModuleRuntime.
"""
self.position = position
@use_strategy(BUILD_STRATEGY)
class LeafDescriptorFactory(Factory):
"""
Factory to generate leaf XModuleDescriptors.
"""
# pylint: disable=missing-docstring
FACTORY_FOR = XModuleDescriptor
runtime = SubFactory(DescriptorSystemFactory)
url_name = LazyAttributeSequence('{.block_type}_{}'.format)
@lazy_attribute
def location(self):
return Location('org', 'course', 'run', 'category', self.url_name, None)
@lazy_attribute
def block_type(self):
return self.descriptor_cls.__name__ # pylint: disable=no-member
@lazy_attribute
def definition_id(self):
return self.location
@lazy_attribute
def usage_id(self):
return self.location
@classmethod
def _build(cls, target_class, *args, **kwargs): # pylint: disable=unused-argument
runtime = kwargs.pop('runtime')
desc_cls = kwargs.pop('descriptor_cls')
block_type = kwargs.pop('block_type')
def_id = kwargs.pop('definition_id')
usage_id = kwargs.pop('usage_id')
block = runtime.construct_xblock_from_class(
desc_cls,
ScopeIds(None, block_type, def_id, usage_id),
DictFieldData(dict(**kwargs))
)
block.save()
return block
class LeafModuleFactory(LeafDescriptorFactory):
"""
Factory to generate leaf XModuleDescriptors that are prepped to be
used as XModules.
"""
@post_generation
def xmodule_runtime(self, create, xmodule_runtime, **kwargs): # pylint: disable=method-hidden, unused-argument
"""
Set the xmodule_runtime to make this XModuleDescriptor usable
as an XModule.
"""
if xmodule_runtime is None:
xmodule_runtime = ModuleSystemFactory()
self.xmodule_runtime = xmodule_runtime
class ContainerDescriptorFactory(LeafDescriptorFactory):
"""
Factory to generate XModuleDescriptors that are containers.
"""
runtime = SubFactory(ContainerDescriptorRuntimeFactory)
children = range(3)
class ContainerModuleFactory(LeafModuleFactory):
"""
Factory to generate XModuleDescriptors that are containers
and are ready to act as XModules.
"""
@lazy_attribute
def xmodule_runtime(self):
return ContainerModuleRuntimeFactory(depth=self.depth) # pylint: disable=no-member
@ddt.ddt
class XBlockWrapperTestMixin(object):
"""
This is a mixin for building tests of the implementation of the XBlock
api by wrapping XModule native functions.
You can creat an actual test case by inheriting from this class and UnitTest,
and implement skip_if_invalid and check_property.
"""
def skip_if_invalid(self, descriptor_cls):
"""
Raise SkipTest if this descriptor_cls shouldn't be tested.
"""
pass
def check_property(self, descriptor): # pylint: disable=unused-argument
"""
Execute assertions to verify that the property under test is true for
the supplied descriptor.
"""
raise SkipTest("check_property not defined")
# Test that for all of the leaf XModule Descriptors,
# the test property holds
@ddt.data(*flatten(LEAF_XMODULES))
def test_leaf_node(self, cls_and_fields):
descriptor_cls, fields = cls_and_fields
self.skip_if_invalid(descriptor_cls)
descriptor = LeafModuleFactory(descriptor_cls=descriptor_cls, **fields)
self.check_property(descriptor)
# Test that when an xmodule is generated from descriptor_cls
# with only xmodule children, the test property holds
@ddt.data(*flatten(CONTAINER_XMODULES))
def test_container_node_xmodules_only(self, cls_and_fields):
descriptor_cls, fields = cls_and_fields
self.skip_if_invalid(descriptor_cls)
descriptor = ContainerModuleFactory(descriptor_cls=descriptor_cls, depth=2, **fields)
self.check_property(descriptor)
# Test that when an xmodule is generated from descriptor_cls
# with mixed xmodule and xblock children, the test property holds
@ddt.data(*flatten(CONTAINER_XMODULES))
def test_container_node_mixed(self, cls_and_fields): # pylint: disable=unused-argument
raise SkipTest("XBlock support in XDescriptor not yet fully implemented")
# Test that when an xmodule is generated from descriptor_cls
# with only xblock children, the test property holds
@ddt.data(*flatten(CONTAINER_XMODULES))
def test_container_node_xblocks_only(self, cls_and_fields): # pylint: disable=unused-argument
raise SkipTest("XBlock support in XModules not yet fully implemented")
class TestStudentView(XBlockWrapperTestMixin, TestCase):
"""
This tests that student_view and XModule.get_html produce the same results.
"""
def skip_if_invalid(self, descriptor_cls):
if descriptor_cls.module_class.student_view != XModule.student_view:
raise SkipTest(descriptor_cls.__name__ + " implements student_view")
def check_property(self, descriptor):
"""
Assert that both student_view and get_html render the same.
"""
self.assertEqual(
descriptor._xmodule.get_html(),
descriptor.render(STUDENT_VIEW).content
)
class TestStudioView(XBlockWrapperTestMixin, TestCase):
"""
This tests that studio_view and XModuleDescriptor.get_html produce the same results
"""
def skip_if_invalid(self, descriptor_cls):
if descriptor_cls in NOT_STUDIO_EDITABLE:
raise SkipTest(descriptor_cls.__name__ + " is not editable in studio")
if descriptor_cls.studio_view != XModuleDescriptor.studio_view:
raise SkipTest(descriptor_cls.__name__ + " implements studio_view")
def check_property(self, descriptor):
"""
Assert that studio_view and get_html render the same.
"""
self.assertEqual(descriptor.get_html(), descriptor.render(STUDIO_VIEW).content)
class TestXModuleHandler(TestCase):
"""
Tests that the xmodule_handler function correctly wraps handle_ajax
"""
def setUp(self):
self.module = XModule(descriptor=Mock(), field_data=Mock(), runtime=Mock(), scope_ids=Mock())
self.module.handle_ajax = Mock(return_value='{}')
self.request = webob.Request({})
def test_xmodule_handler_passed_data(self):
self.module.xmodule_handler(self.request)
self.module.handle_ajax.assert_called_with(None, self.request.POST)
def test_xmodule_handler_dispatch(self):
self.module.xmodule_handler(self.request, 'dispatch')
self.module.handle_ajax.assert_called_with('dispatch', self.request.POST)
def test_xmodule_handler_return_value(self):
response = self.module.xmodule_handler(self.request)
self.assertIsInstance(response, webob.Response)
self.assertEqual(response.body, '{}')
class TestXmlExport(XBlockWrapperTestMixin, TestCase):
"""
This tests that XModuleDescriptor.export_to_xml and add_xml_to_node produce the same results.
"""
def skip_if_invalid(self, descriptor_cls):
if descriptor_cls.add_xml_to_node != XModuleDescriptor.add_xml_to_node:
raise SkipTest(descriptor_cls.__name__ + " implements add_xml_to_node")
def check_property(self, descriptor):
xmodule_api_fs = MemoryFS()
xblock_api_fs = MemoryFS()
descriptor.runtime.export_fs = xblock_api_fs
xblock_node = etree.Element('unknown')
descriptor.add_xml_to_node(xblock_node)
xmodule_node = etree.fromstring(descriptor.export_to_xml(xmodule_api_fs))
self.assertEquals(list(xmodule_api_fs.walk()), list(xblock_api_fs.walk()))
self.assertEquals(etree.tostring(xmodule_node), etree.tostring(xblock_node))
| agpl-3.0 |
flaviostutz/openalpr | src/bindings/python/openalpr/openalpr.py | 9 | 3694 | import ctypes
import json
import platform
class Alpr():
def __init__(self, country, config_file, runtime_dir):
# Load the .dll for Windows and the .so for Unix-based
if platform.system().lower().find("windows") != -1:
self._openalprpy_lib = ctypes.cdll.LoadLibrary("openalprpy.dll")
elif platform.system().lower().find("darwin") != -1:
self._openalprpy_lib = ctypes.cdll.LoadLibrary("libopenalprpy.dylib")
else:
self._openalprpy_lib = ctypes.cdll.LoadLibrary("libopenalprpy.so")
self._initialize_func = self._openalprpy_lib.initialize
self._initialize_func.restype = ctypes.c_void_p
self._initialize_func.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p]
self._dispose_func = self._openalprpy_lib.dispose
self._dispose_func.argtypes = [ctypes.c_void_p]
self._is_loaded_func = self._openalprpy_lib.isLoaded
self._is_loaded_func.argtypes = [ctypes.c_void_p]
self._is_loaded_func.restype = ctypes.c_bool
self._recognize_file_func = self._openalprpy_lib.recognizeFile
self._recognize_file_func.restype = ctypes.c_void_p
self._recognize_file_func.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
self._recognize_array_func = self._openalprpy_lib.recognizeArray
self._recognize_array_func.restype = ctypes.c_void_p
self._recognize_array_func.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_ubyte), ctypes.c_uint]
self._free_json_mem_func = self._openalprpy_lib.freeJsonMem
self._set_default_region_func = self._openalprpy_lib.setDefaultRegion
self._set_default_region_func.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
self._set_detect_region_func = self._openalprpy_lib.setDetectRegion
self._set_detect_region_func.argtypes = [ctypes.c_void_p, ctypes.c_bool]
self._set_top_n_func = self._openalprpy_lib.setTopN
self._set_top_n_func.argtypes = [ctypes.c_void_p, ctypes.c_int]
self._get_version_func = self._openalprpy_lib.getVersion
self._get_version_func.argtypes = [ctypes.c_void_p]
self._get_version_func.restype = ctypes.c_void_p
self.alpr_pointer = self._initialize_func(country, config_file, runtime_dir)
def unload(self):
self._openalprpy_lib.dispose(self.alpr_pointer)
def is_loaded(self):
return self._is_loaded_func(self.alpr_pointer)
def recognize_file(self, file_path):
ptr = self._recognize_file_func(self.alpr_pointer, file_path)
json_data = ctypes.cast(ptr, ctypes.c_char_p).value
response_obj = json.loads(json_data)
self._free_json_mem_func(ctypes.c_void_p(ptr))
return response_obj
def recognize_array(self, byte_array):
pb = ctypes.cast(byte_array, ctypes.POINTER(ctypes.c_ubyte))
ptr = self._recognize_array_func(self.alpr_pointer, pb, len(byte_array))
json_data = ctypes.cast(ptr, ctypes.c_char_p).value
response_obj = json.loads(json_data)
self._free_json_mem_func(ctypes.c_void_p(ptr))
return response_obj
def get_version(self):
ptr = self._get_version_func(self.alpr_pointer)
version_number = ctypes.cast(ptr, ctypes.c_char_p).value
self._free_json_mem_func(ctypes.c_void_p(ptr))
return version_number
def set_top_n(self, topn):
self._set_top_n_func(self.alpr_pointer, topn)
def set_default_region(self, region):
self._set_default_region_func(self.alpr_pointer, region)
def set_detect_region(self, enabled):
self._set_detect_region_func(self.alpr_pointer, enabled)
| agpl-3.0 |
jasonmccampbell/numpy-refactor-sprint | numpy/distutils/unixccompiler.py | 75 | 3651 | """
unixccompiler - can handle very long argument lists for ar.
"""
import os
from distutils.errors import DistutilsExecError, CompileError
from distutils.unixccompiler import *
from numpy.distutils.ccompiler import replace_method
from numpy.distutils.compat import get_exception
if sys.version_info[0] < 3:
import log
else:
from numpy.distutils import log
# Note that UnixCCompiler._compile appeared in Python 2.3
def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
"""Compile a single source files with a Unix-style compiler."""
display = '%s: %s' % (os.path.basename(self.compiler_so[0]),src)
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
extra_postargs, display = display)
except DistutilsExecError:
msg = str(get_exception())
raise CompileError(msg)
replace_method(UnixCCompiler, '_compile', UnixCCompiler__compile)
def UnixCCompiler_create_static_lib(self, objects, output_libname,
output_dir=None, debug=0, target_lang=None):
"""
Build a static library in a separate sub-process.
Parameters
----------
objects : list or tuple of str
List of paths to object files used to build the static library.
output_libname : str
The library name as an absolute or relative (if `output_dir` is used)
path.
output_dir : str, optional
The path to the output directory. Default is None, in which case
the ``output_dir`` attribute of the UnixCCompiler instance.
debug : bool, optional
This parameter is not used.
target_lang : str, optional
This parameter is not used.
Returns
-------
None
"""
objects, output_dir = self._fix_object_args(objects, output_dir)
output_filename = \
self.library_filename(output_libname, output_dir=output_dir)
if self._need_link(objects, output_filename):
try:
# previous .a may be screwed up; best to remove it first
# and recreate.
# Also, ar on OS X doesn't handle updating universal archives
os.unlink(output_filename)
except (IOError, OSError):
pass
self.mkpath(os.path.dirname(output_filename))
tmp_objects = objects + self.objects
while tmp_objects:
objects = tmp_objects[:50]
tmp_objects = tmp_objects[50:]
display = '%s: adding %d object files to %s' % (
os.path.basename(self.archiver[0]),
len(objects), output_filename)
self.spawn(self.archiver + [output_filename] + objects,
display = display)
# Not many Unices required ranlib anymore -- SunOS 4.x is, I
# think the only major Unix that does. Maybe we need some
# platform intelligence here to skip ranlib if it's not
# needed -- or maybe Python's configure script took care of
# it for us, hence the check for leading colon.
if self.ranlib:
display = '%s:@ %s' % (os.path.basename(self.ranlib[0]),
output_filename)
try:
self.spawn(self.ranlib + [output_filename],
display = display)
except DistutilsExecError:
msg = str(get_exception())
raise LibError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
return
replace_method(UnixCCompiler, 'create_static_lib',
UnixCCompiler_create_static_lib)
| bsd-3-clause |
butterflypay/bitcoin | qa/rpc-tests/p2p-acceptblock.py | 49 | 12364 | #!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase
'''
AcceptBlockTest -- test processing of unrequested blocks.
Since behavior differs when receiving unrequested blocks from whitelisted peers
versus non-whitelisted peers, this tests the behavior of both (effectively two
separate tests running in parallel).
Setup: two nodes, node0 and node1, not connected to each other. Node0 does not
whitelist localhost, but node1 does. They will each be on their own chain for
this test.
We have one NodeConn connection to each, test_node and white_node respectively.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance.
3. Mine a block that forks the previous block, and deliver to each node from
corresponding peer.
Node0 should not process this block (just accept the header), because it is
unrequested and doesn't have more work than the tip.
Node1 should process because this is coming from a whitelisted peer.
4. Send another block that builds on the forking block.
Node0 should process this block but be stuck on the shorter chain, because
it's missing an intermediate block.
Node1 should reorg to this longer chain.
4b.Send 288 more blocks on the longer chain.
Node0 should process all but the last block (too far ahead in height).
Send all headers to Node1, and then send the last block in that chain.
Node1 should accept the block because it's coming from a whitelisted peer.
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.create_callback_map()
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
self.connection.send_message(msg_ping(nonce=self.ping_counter))
received_pong = False
sleep_time = 0.05
while not received_pong and timeout > 0:
time.sleep(sleep_time)
timeout -= sleep_time
with mininode_lock:
if self.last_pong.nonce == self.ping_counter:
received_pong = True
self.ping_counter += 1
return received_pong
class AcceptBlockTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "bitcoind"),
help="bitcoind binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"],
binary=self.options.testbinary))
self.nodes.append(start_node(1, self.options.tmpdir,
["-debug", "-whitelist=127.0.0.1"],
binary=self.options.testbinary))
def run_test(self):
# Setup the p2p connections and start up the network thread.
test_node = TestNode() # connects to node0 (not whitelisted)
white_node = TestNode() # connects to node1 (whitelisted)
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
test_node.add_connection(connections[0])
white_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
white_node.wait_for_verack()
# 1. Have both nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int ("0x" + n.getbestblockhash() + "L", 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted.
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = time.time() + 1
for i in xrange(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
white_node.send_message(msg_block(blocks_h2[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 2)
print "First height 2 block accepted by both nodes"
# 3. Send another block that builds on the original tip.
blocks_h2f = [] # Blocks at height 2 that fork off the main chain
for i in xrange(2):
blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
blocks_h2f[i].solve()
test_node.send_message(msg_block(blocks_h2f[0]))
white_node.send_message(msg_block(blocks_h2f[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h2f[0].hash:
assert_equal(x['status'], "headers-only")
for x in self.nodes[1].getchaintips():
if x['hash'] == blocks_h2f[1].hash:
assert_equal(x['status'], "valid-headers")
print "Second height 2 block accepted only from whitelisted peer"
# 4. Now send another block that builds on the forking chain.
blocks_h3 = []
for i in xrange(2):
blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
blocks_h3[i].solve()
test_node.send_message(msg_block(blocks_h3[0]))
white_node.send_message(msg_block(blocks_h3[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
# Since the earlier block was not processed by node0, the new block
# can't be fully validated.
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h3[0].hash:
assert_equal(x['status'], "headers-only")
# But this block should be accepted by node0 since it has more work.
try:
self.nodes[0].getblock(blocks_h3[0].hash)
print "Unrequested more-work block accepted from non-whitelisted peer"
except:
raise AssertionError("Unrequested more work block was not processed")
# Node1 should have accepted and reorged.
assert_equal(self.nodes[1].getblockcount(), 3)
print "Successfully reorged to length 3 chain from whitelisted peer"
# 4b. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node0. Node1 should process the tip if
# we give it the headers chain leading to the tip.
tips = blocks_h3
headers_message = msg_headers()
all_blocks = [] # node0's blocks
for j in xrange(2):
for i in xrange(288):
next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
next_block.solve()
if j==0:
test_node.send_message(msg_block(next_block))
all_blocks.append(next_block)
else:
headers_message.headers.append(CBlockHeader(next_block))
tips[j] = next_block
time.sleep(2)
for x in all_blocks:
try:
self.nodes[0].getblock(x.hash)
if x == all_blocks[287]:
raise AssertionError("Unrequested block too far-ahead should have been ignored")
except:
if x == all_blocks[287]:
print "Unrequested block too far-ahead not processed"
else:
raise AssertionError("Unrequested block with more work should have been accepted")
headers_message.headers.pop() # Ensure the last block is unrequested
white_node.send_message(headers_message) # Send headers leading to tip
white_node.send_message(msg_block(tips[1])) # Now deliver the tip
try:
white_node.sync_with_ping()
self.nodes[1].getblock(tips[1].hash)
print "Unrequested block far ahead of tip accepted from whitelisted peer"
except:
raise AssertionError("Unrequested block from whitelisted peer not accepted")
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
test_node.send_message(msg_block(blocks_h2f[0]))
# Here, if the sleep is too short, the test could falsely succeed (if the
# node hasn't processed the block by the time the sleep returns, and then
# the node processes it and incorrectly advances the tip).
# But this would be caught later on, when we verify that an inv triggers
# a getdata request for this block.
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
print "Unrequested block that would complete more-work chain was ignored"
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_getdata = None
test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_getdata
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
print "Inv at tip triggered getdata for unprocessed block"
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(blocks_h2f[0]))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
print "Successfully reorged to longer chain from non-whitelisted peer"
[ c.disconnect_node() for c in connections ]
if __name__ == '__main__':
AcceptBlockTest().main()
| mit |
wlerin/streamlink | src/streamlink/plugins/pluzz.py | 3 | 8697 | import logging
import re
import sys
import time
from streamlink.plugin import Plugin, PluginArguments, PluginArgument
from streamlink.plugin.api import validate
from streamlink.stream import DASHStream, HDSStream, HLSStream, HTTPStream
from streamlink.stream.ffmpegmux import MuxedStream
log = logging.getLogger(__name__)
class Pluzz(Plugin):
GEO_URL = 'http://geo.francetv.fr/ws/edgescape.json'
API_URL = 'http://sivideo.webservices.francetelevisions.fr/tools/getInfosOeuvre/v2/?idDiffusion={0}'
TOKEN_URL = 'http://hdfauthftv-a.akamaihd.net/esi/TA?url={0}'
SWF_PLAYER_URL = 'https://staticftv-a.akamaihd.net/player/bower_components/player_flash/dist/FranceTVNVPVFlashPlayer.akamai-7301b6035a43c4e29b7935c9c36771d2.swf'
_url_re = re.compile(r'''
https?://(
(?:www\.)france\.tv/.+\.html |
www\.(ludo|zouzous)\.fr/heros/[\w-]+ |
(.+\.)?francetvinfo\.fr)
''', re.VERBOSE)
_pluzz_video_id_re = re.compile(r'''(?P<q>["']*)videoId(?P=q):\s*["'](?P<video_id>[^"']+)["']''')
_jeunesse_video_id_re = re.compile(r'playlist: \[{.*?,"identity":"(?P<video_id>.+?)@(?P<catalogue>Ludo|Zouzous)"')
_sport_video_id_re = re.compile(r'data-video="(?P<video_id>.+?)"')
_embed_video_id_re = re.compile(r'href="http://videos\.francetv\.fr/video/(?P<video_id>.+?)(?:@.+?)?"')
_hds_pv_data_re = re.compile(r"~data=.+?!")
_mp4_bitrate_re = re.compile(r'.*-(?P<bitrate>[0-9]+k)\.mp4')
_geo_schema = validate.Schema({
'reponse': {
'geo_info': {
'country_code': validate.text
}
}
})
_api_schema = validate.Schema({
'videos': validate.all(
[{
'format': validate.any(
None,
validate.text
),
'url': validate.any(
None,
validate.url(),
),
'statut': validate.text,
'drm': bool,
'geoblocage': validate.any(
None,
[validate.all(validate.text)]
),
'plages_ouverture': validate.all(
[{
'debut': validate.any(
None,
int
),
'fin': validate.any(
None,
int
)
}]
)
}]
),
'subtitles': validate.any(
[],
validate.all(
[{
'type': validate.text,
'url': validate.url(),
'format': validate.text
}]
)
)
})
_player_schema = validate.Schema({'result': validate.url()})
arguments = PluginArguments(
PluginArgument(
"mux-subtitles",
action="store_true",
help="""
Automatically mux available subtitles in to the output stream.
"""
)
)
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url) is not None
def _get_streams(self):
# Retrieve geolocation data
res = self.session.http.get(self.GEO_URL)
geo = self.session.http.json(res, schema=self._geo_schema)
country_code = geo['reponse']['geo_info']['country_code']
log.debug('Country: {0}'.format(country_code))
# Retrieve URL page and search for video ID
res = self.session.http.get(self.url)
if 'france.tv' in self.url:
match = self._pluzz_video_id_re.search(res.text)
elif 'ludo.fr' in self.url or 'zouzous.fr' in self.url:
match = self._jeunesse_video_id_re.search(res.text)
elif 'sport.francetvinfo.fr' in self.url:
match = self._sport_video_id_re.search(res.text)
else:
match = self._embed_video_id_re.search(res.text)
if match is None:
return
video_id = match.group('video_id')
log.debug('Video ID: {0}'.format(video_id))
res = self.session.http.get(self.API_URL.format(video_id))
videos = self.session.http.json(res, schema=self._api_schema)
now = time.time()
offline = False
geolocked = False
drm = False
expired = False
streams = []
for video in videos['videos']:
log.trace('{0!r}'.format(video))
video_url = video['url']
# Check whether video format is available
if video['statut'] != 'ONLINE':
offline = offline or True
continue
# Check whether video format is geo-locked
if video['geoblocage'] is not None and country_code not in video['geoblocage']:
geolocked = geolocked or True
continue
# Check whether video is DRM-protected
if video['drm']:
drm = drm or True
continue
# Check whether video format is expired
available = False
for interval in video['plages_ouverture']:
available = (interval['debut'] or 0) <= now <= (interval['fin'] or sys.maxsize)
if available:
break
if not available:
expired = expired or True
continue
res = self.session.http.get(self.TOKEN_URL.format(video_url))
video_url = res.text
if '.mpd' in video_url:
# Get redirect video URL
res = self.session.http.get(res.text)
video_url = res.url
for bitrate, stream in DASHStream.parse_manifest(self.session,
video_url).items():
streams.append((bitrate, stream))
elif '.f4m' in video_url:
for bitrate, stream in HDSStream.parse_manifest(self.session,
video_url,
is_akamai=True,
pvswf=self.SWF_PLAYER_URL).items():
# HDS videos with data in their manifest fragment token
# doesn't seem to be supported by HDSStream. Ignore such
# stream (but HDS stream having only the hdntl parameter in
# their manifest token will be provided)
pvtoken = stream.request_params['params'].get('pvtoken', '')
match = self._hds_pv_data_re.search(pvtoken)
if match is None:
streams.append((bitrate, stream))
elif '.m3u8' in video_url:
for stream in HLSStream.parse_variant_playlist(self.session, video_url).items():
streams.append(stream)
# HBB TV streams are not provided anymore by France Televisions
elif '.mp4' in video_url and '/hbbtv/' not in video_url:
match = self._mp4_bitrate_re.match(video_url)
if match is not None:
bitrate = match.group('bitrate')
else:
# Fallback bitrate (seems all France Televisions MP4 videos
# seem have such bitrate)
bitrate = '1500k'
streams.append((bitrate, HTTPStream(self.session, video_url)))
if self.get_option("mux_subtitles") and videos['subtitles'] != []:
substreams = {}
for subtitle in videos['subtitles']:
# TTML subtitles are available but not supported by FFmpeg
if subtitle['format'] == 'ttml':
continue
substreams[subtitle['type']] = HTTPStream(self.session, subtitle['url'])
for quality, stream in streams:
yield quality, MuxedStream(self.session, stream, subtitles=substreams)
else:
for stream in streams:
yield stream
if offline:
log.error('Failed to access stream, may be due to offline content')
if geolocked:
log.error('Failed to access stream, may be due to geo-restricted content')
if drm:
log.error('Failed to access stream, may be due to DRM-protected content')
if expired:
log.error('Failed to access stream, may be due to expired content')
__plugin__ = Pluzz
| bsd-2-clause |
acenario/Payable | lib/python2.7/site-packages/django/http/multipartparser.py | 105 | 24204 | """
Multi-part parsing for file uploads.
Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to
file upload handlers for processing.
"""
from __future__ import unicode_literals
import base64
import binascii
import cgi
import sys
from django.conf import settings
from django.core.exceptions import SuspiciousMultipartForm
from django.core.files.uploadhandler import (
SkipFile, StopFutureHandlers, StopUpload,
)
from django.utils import six
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import unquote
from django.utils.text import unescape_entities
__all__ = ('MultiPartParser', 'MultiPartParserError', 'InputStreamExhausted')
class MultiPartParserError(Exception):
pass
class InputStreamExhausted(Exception):
"""
No more reads are allowed from this device.
"""
pass
RAW = "raw"
FILE = "file"
FIELD = "field"
_BASE64_DECODE_ERROR = TypeError if six.PY2 else binascii.Error
class MultiPartParser(object):
"""
A rfc2388 multipart/form-data parser.
``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
"""
def __init__(self, META, input_data, upload_handlers, encoding=None):
"""
Initialize the MultiPartParser object.
:META:
The standard ``META`` dictionary in Django request objects.
:input_data:
The raw post data, as a file-like object.
:upload_handlers:
A list of UploadHandler instances that perform operations on the uploaded
data.
:encoding:
The encoding with which to treat the incoming data.
"""
#
# Content-Type should contain multipart and the boundary information.
#
content_type = META.get('HTTP_CONTENT_TYPE', META.get('CONTENT_TYPE', ''))
if not content_type.startswith('multipart/'):
raise MultiPartParserError('Invalid Content-Type: %s' % content_type)
# Parse the header to get the boundary to split the parts.
ctypes, opts = parse_header(content_type.encode('ascii'))
boundary = opts.get('boundary')
if not boundary or not cgi.valid_boundary(boundary):
raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary)
# Content-Length should contain the length of the body we are about
# to receive.
try:
content_length = int(META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH', 0)))
except (ValueError, TypeError):
content_length = 0
if content_length < 0:
# This means we shouldn't continue...raise an error.
raise MultiPartParserError("Invalid content length: %r" % content_length)
if isinstance(boundary, six.text_type):
boundary = boundary.encode('ascii')
self._boundary = boundary
self._input_data = input_data
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
self._chunk_size = min([2 ** 31 - 4] + possible_sizes)
self._meta = META
self._encoding = encoding or settings.DEFAULT_CHARSET
self._content_length = content_length
self._upload_handlers = upload_handlers
def parse(self):
"""
Parse the POST data and break it into a FILES MultiValueDict and a POST
MultiValueDict.
Returns a tuple containing the POST and FILES dictionary, respectively.
"""
# We have to import QueryDict down here to avoid a circular import.
from django.http import QueryDict
encoding = self._encoding
handlers = self._upload_handlers
# HTTP spec says that Content-Length >= 0 is valid
# handling content-length == 0 before continuing
if self._content_length == 0:
return QueryDict('', encoding=self._encoding), MultiValueDict()
# See if any of the handlers take care of the parsing.
# This allows overriding everything if need be.
for handler in handlers:
result = handler.handle_raw_input(self._input_data,
self._meta,
self._content_length,
self._boundary,
encoding)
# Check to see if it was handled
if result is not None:
return result[0], result[1]
# Create the data structures to be used later.
self._post = QueryDict('', mutable=True)
self._files = MultiValueDict()
# Instantiate the parser and stream:
stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))
# Whether or not to signal a file-completion at the beginning of the loop.
old_field_name = None
counters = [0] * len(handlers)
try:
for item_type, meta_data, field_stream in Parser(stream, self._boundary):
if old_field_name:
# We run this at the beginning of the next loop
# since we cannot be sure a file is complete until
# we hit the next boundary/part of the multipart content.
self.handle_file_complete(old_field_name, counters)
old_field_name = None
try:
disposition = meta_data['content-disposition'][1]
field_name = disposition['name'].strip()
except (KeyError, IndexError, AttributeError):
continue
transfer_encoding = meta_data.get('content-transfer-encoding')
if transfer_encoding is not None:
transfer_encoding = transfer_encoding[0].strip()
field_name = force_text(field_name, encoding, errors='replace')
if item_type == FIELD:
# This is a post field, we can just set it in the post
if transfer_encoding == 'base64':
raw_data = field_stream.read()
try:
data = base64.b64decode(raw_data)
except _BASE64_DECODE_ERROR:
data = raw_data
else:
data = field_stream.read()
self._post.appendlist(field_name,
force_text(data, encoding, errors='replace'))
elif item_type == FILE:
# This is a file, use the handler...
file_name = disposition.get('filename')
if not file_name:
continue
file_name = force_text(file_name, encoding, errors='replace')
file_name = self.IE_sanitize(unescape_entities(file_name))
content_type, content_type_extra = meta_data.get('content-type', ('', {}))
content_type = content_type.strip()
charset = content_type_extra.get('charset')
try:
content_length = int(meta_data.get('content-length')[0])
except (IndexError, TypeError, ValueError):
content_length = None
counters = [0] * len(handlers)
try:
for handler in handlers:
try:
handler.new_file(field_name, file_name,
content_type, content_length,
charset, content_type_extra)
except StopFutureHandlers:
break
for chunk in field_stream:
if transfer_encoding == 'base64':
# We only special-case base64 transfer encoding
# We should always decode base64 chunks by multiple of 4,
# ignoring whitespace.
stripped_chunk = b"".join(chunk.split())
remaining = len(stripped_chunk) % 4
while remaining != 0:
over_chunk = field_stream.read(4 - remaining)
stripped_chunk += b"".join(over_chunk.split())
remaining = len(stripped_chunk) % 4
try:
chunk = base64.b64decode(stripped_chunk)
except Exception as e:
# Since this is only a chunk, any error is an unfixable error.
msg = "Could not decode base64 data: %r" % e
six.reraise(MultiPartParserError, MultiPartParserError(msg), sys.exc_info()[2])
for i, handler in enumerate(handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk,
counters[i])
counters[i] += chunk_length
if chunk is None:
# If the chunk received by the handler is None, then don't continue.
break
except SkipFile:
self._close_files()
# Just use up the rest of this file...
exhaust(field_stream)
else:
# Handle file upload completions on next iteration.
old_field_name = field_name
else:
# If this is neither a FIELD or a FILE, just exhaust the stream.
exhaust(stream)
except StopUpload as e:
self._close_files()
if not e.connection_reset:
exhaust(self._input_data)
else:
# Make sure that the request data is all fed
exhaust(self._input_data)
# Signal that the upload has completed.
for handler in handlers:
retval = handler.upload_complete()
if retval:
break
return self._post, self._files
def handle_file_complete(self, old_field_name, counters):
"""
Handle all the signaling that takes place when a file is complete.
"""
for i, handler in enumerate(self._upload_handlers):
file_obj = handler.file_complete(counters[i])
if file_obj:
# If it returns a file object, then set the files dict.
self._files.appendlist(
force_text(old_field_name, self._encoding, errors='replace'),
file_obj)
break
def IE_sanitize(self, filename):
"""Cleanup filename from Internet Explorer full paths."""
return filename and filename[filename.rfind("\\") + 1:].strip()
def _close_files(self):
# Free up all file handles.
# FIXME: this currently assumes that upload handlers store the file as 'file'
# We should document that... (Maybe add handler.free_file to complement new_file)
for handler in self._upload_handlers:
if hasattr(handler, 'file'):
handler.file.close()
class LazyStream(six.Iterator):
"""
The LazyStream wrapper allows one to get and "unget" bytes from a stream.
Given a producer object (an iterator that yields bytestrings), the
LazyStream object will support iteration, reading, and keeping a "look-back"
variable in case you need to "unget" some bytes.
"""
def __init__(self, producer, length=None):
"""
Every LazyStream must have a producer when instantiated.
A producer is an iterable that returns a string each time it
is called.
"""
self._producer = producer
self._empty = False
self._leftover = b''
self.length = length
self.position = 0
self._remaining = length
self._unget_history = []
def tell(self):
return self.position
def read(self, size=None):
def parts():
remaining = self._remaining if size is None else size
# do the whole thing in one shot if no limit was provided.
if remaining is None:
yield b''.join(self)
return
# otherwise do some bookkeeping to return exactly enough
# of the stream and stashing any extra content we get from
# the producer
while remaining != 0:
assert remaining > 0, 'remaining bytes to read should never go negative'
chunk = next(self)
emitting = chunk[:remaining]
self.unget(chunk[remaining:])
remaining -= len(emitting)
yield emitting
out = b''.join(parts())
return out
def __next__(self):
"""
Used when the exact number of bytes to read is unimportant.
This procedure just returns whatever is chunk is conveniently returned
from the iterator instead. Useful to avoid unnecessary bookkeeping if
performance is an issue.
"""
if self._leftover:
output = self._leftover
self._leftover = b''
else:
output = next(self._producer)
self._unget_history = []
self.position += len(output)
return output
def close(self):
"""
Used to invalidate/disable this lazy stream.
Replaces the producer with an empty list. Any leftover bytes that have
already been read will still be reported upon read() and/or next().
"""
self._producer = []
def __iter__(self):
return self
def unget(self, bytes):
"""
Places bytes back onto the front of the lazy stream.
Future calls to read() will return those bytes first. The
stream position and thus tell() will be rewound.
"""
if not bytes:
return
self._update_unget_history(len(bytes))
self.position -= len(bytes)
self._leftover = b''.join([bytes, self._leftover])
def _update_unget_history(self, num_bytes):
"""
Updates the unget history as a sanity check to see if we've pushed
back the same number of bytes in one chunk. If we keep ungetting the
same number of bytes many times (here, 50), we're mostly likely in an
infinite loop of some sort. This is usually caused by a
maliciously-malformed MIME request.
"""
self._unget_history = [num_bytes] + self._unget_history[:49]
number_equal = len([current_number for current_number in self._unget_history
if current_number == num_bytes])
if number_equal > 40:
raise SuspiciousMultipartForm(
"The multipart parser got stuck, which shouldn't happen with"
" normal uploaded files. Check for malicious upload activity;"
" if there is none, report this to the Django developers."
)
class ChunkIter(six.Iterator):
"""
An iterable that will yield chunks of data. Given a file-like object as the
constructor, this object will yield chunks of read operations from that
object.
"""
def __init__(self, flo, chunk_size=64 * 1024):
self.flo = flo
self.chunk_size = chunk_size
def __next__(self):
try:
data = self.flo.read(self.chunk_size)
except InputStreamExhausted:
raise StopIteration()
if data:
return data
else:
raise StopIteration()
def __iter__(self):
return self
class InterBoundaryIter(six.Iterator):
"""
A Producer that will iterate over boundaries.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
def __iter__(self):
return self
def __next__(self):
try:
return LazyStream(BoundaryIter(self._stream, self._boundary))
except InputStreamExhausted:
raise StopIteration()
class BoundaryIter(six.Iterator):
"""
A Producer that is sensitive to boundaries.
Will happily yield bytes until a boundary is found. Will yield the bytes
before the boundary, throw away the boundary bytes themselves, and push the
post-boundary bytes back on the stream.
The future calls to next() after locating the boundary will raise a
StopIteration exception.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
self._done = False
# rollback an additional six bytes because the format is like
# this: CRLF<boundary>[--CRLF]
self._rollback = len(boundary) + 6
# Try to use mx fast string search if available. Otherwise
# use Python find. Wrap the latter for consistency.
unused_char = self._stream.read(1)
if not unused_char:
raise InputStreamExhausted()
self._stream.unget(unused_char)
def __iter__(self):
return self
def __next__(self):
if self._done:
raise StopIteration()
stream = self._stream
rollback = self._rollback
bytes_read = 0
chunks = []
for bytes in stream:
bytes_read += len(bytes)
chunks.append(bytes)
if bytes_read > rollback:
break
if not bytes:
break
else:
self._done = True
if not chunks:
raise StopIteration()
chunk = b''.join(chunks)
boundary = self._find_boundary(chunk, len(chunk) < self._rollback)
if boundary:
end, next = boundary
stream.unget(chunk[next:])
self._done = True
return chunk[:end]
else:
# make sure we don't treat a partial boundary (and
# its separators) as data
if not chunk[:-rollback]: # and len(chunk) >= (len(self._boundary) + 6):
# There's nothing left, we should just return and mark as done.
self._done = True
return chunk
else:
stream.unget(chunk[-rollback:])
return chunk[:-rollback]
def _find_boundary(self, data, eof=False):
"""
Finds a multipart boundary in data.
Should no boundary exist in the data None is returned instead. Otherwise
a tuple containing the indices of the following are returned:
* the end of current encapsulation
* the start of the next encapsulation
"""
index = data.find(self._boundary)
if index < 0:
return None
else:
end = index
next = index + len(self._boundary)
# backup over CRLF
last = max(0, end - 1)
if data[last:last + 1] == b'\n':
end -= 1
last = max(0, end - 1)
if data[last:last + 1] == b'\r':
end -= 1
return end, next
def exhaust(stream_or_iterable):
"""
Completely exhausts an iterator or stream.
Raise a MultiPartParserError if the argument is not a stream or an iterable.
"""
iterator = None
try:
iterator = iter(stream_or_iterable)
except TypeError:
iterator = ChunkIter(stream_or_iterable, 16384)
if iterator is None:
raise MultiPartParserError('multipartparser.exhaust() was passed a non-iterable or stream parameter')
for __ in iterator:
pass
def parse_boundary_stream(stream, max_header_size):
"""
Parses one and exactly one stream that encapsulates a boundary.
"""
# Stream at beginning of header, look for end of header
# and parse it if found. The header must fit within one
# chunk.
chunk = stream.read(max_header_size)
# 'find' returns the top of these four bytes, so we'll
# need to munch them later to prevent them from polluting
# the payload.
header_end = chunk.find(b'\r\n\r\n')
def _parse_header(line):
main_value_pair, params = parse_header(line)
try:
name, value = main_value_pair.split(':', 1)
except ValueError:
raise ValueError("Invalid header: %r" % line)
return name, (value, params)
if header_end == -1:
# we find no header, so we just mark this fact and pass on
# the stream verbatim
stream.unget(chunk)
return (RAW, {}, stream)
header = chunk[:header_end]
# here we place any excess chunk back onto the stream, as
# well as throwing away the CRLFCRLF bytes from above.
stream.unget(chunk[header_end + 4:])
TYPE = RAW
outdict = {}
# Eliminate blank lines
for line in header.split(b'\r\n'):
# This terminology ("main value" and "dictionary of
# parameters") is from the Python docs.
try:
name, (value, params) = _parse_header(line)
except ValueError:
continue
if name == 'content-disposition':
TYPE = FIELD
if params.get('filename'):
TYPE = FILE
outdict[name] = value, params
if TYPE == RAW:
stream.unget(chunk)
return (TYPE, outdict, stream)
class Parser(object):
def __init__(self, stream, boundary):
self._stream = stream
self._separator = b'--' + boundary
def __iter__(self):
boundarystream = InterBoundaryIter(self._stream, self._separator)
for sub_stream in boundarystream:
# Iterate over each part
yield parse_boundary_stream(sub_stream, 1024)
def parse_header(line):
""" Parse the header into a key-value.
Input (line): bytes, output: unicode for key/name, bytes for value which
will be decoded later
"""
plist = _parse_header_params(b';' + line)
key = plist.pop(0).lower().decode('ascii')
pdict = {}
for p in plist:
i = p.find(b'=')
if i >= 0:
has_encoding = False
name = p[:i].strip().lower().decode('ascii')
if name.endswith('*'):
# Lang/encoding embedded in the value (like "filename*=UTF-8''file.ext")
# http://tools.ietf.org/html/rfc2231#section-4
name = name[:-1]
if p.count(b"'") == 2:
has_encoding = True
value = p[i + 1:].strip()
if has_encoding:
encoding, lang, value = value.split(b"'")
if six.PY3:
value = unquote(value.decode(), encoding=encoding.decode())
else:
value = unquote(value).decode(encoding)
if len(value) >= 2 and value[:1] == value[-1:] == b'"':
value = value[1:-1]
value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"')
pdict[name] = value
return key, pdict
def _parse_header_params(s):
plist = []
while s[:1] == b';':
s = s[1:]
end = s.find(b';')
while end > 0 and s.count(b'"', 0, end) % 2:
end = s.find(b';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
plist.append(f.strip())
s = s[end:]
return plist
| mit |
ericzundel/pants | tests/python/pants_test/engine/legacy/test_address_mapper.py | 1 | 7395 | # coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import unittest
import mock
from pants.base.specs import SiblingAddresses, SingleAddress
from pants.bin.engine_initializer import EngineInitializer
from pants.build_graph.address import Address
from pants.build_graph.address_mapper import AddressMapper
from pants.engine.legacy.address_mapper import LegacyAddressMapper
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_file_dump, safe_mkdir
from pants_test.engine.util import init_native
class LegacyAddressMapperTest(unittest.TestCase):
_native = init_native()
def create_build_files(self, build_root):
# Create BUILD files
# build_root:
# BUILD
# BUILD.other
# dir_a:
# BUILD
# BUILD.other
# subdir:
# BUILD
# dir_b:
# BUILD
dir_a = os.path.join(build_root, 'dir_a')
dir_b = os.path.join(build_root, 'dir_b')
dir_a_subdir = os.path.join(dir_a, 'subdir')
safe_mkdir(dir_a)
safe_mkdir(dir_b)
safe_mkdir(dir_a_subdir)
safe_file_dump(os.path.join(build_root, 'BUILD'), 'target(name="a")\ntarget(name="b")')
safe_file_dump(os.path.join(build_root, 'BUILD.other'), 'target(name="c")')
safe_file_dump(os.path.join(dir_a, 'BUILD'), 'target(name="a")\ntarget(name="b")')
safe_file_dump(os.path.join(dir_a, 'BUILD.other'), 'target(name="c")')
safe_file_dump(os.path.join(dir_b, 'BUILD'), 'target(name="a")')
safe_file_dump(os.path.join(dir_a_subdir, 'BUILD'), 'target(name="a")')
def create_address_mapper(self, build_root):
scheduler, engine, _, _ = EngineInitializer.setup_legacy_graph([], build_root=build_root, native=self._native)
return LegacyAddressMapper(scheduler, engine, build_root)
def test_is_valid_single_address(self):
with temporary_dir() as build_root:
self.create_build_files(build_root)
mapper = self.create_address_mapper(build_root)
self.assertFalse(mapper.is_valid_single_address(SingleAddress('dir_a', 'foo')))
self.assertTrue(mapper.is_valid_single_address(SingleAddress('dir_a', 'a')))
with self.assertRaises(TypeError):
mapper.is_valid_single_address('foo')
def test_scan_build_files(self):
with temporary_dir() as build_root:
self.create_build_files(build_root)
mapper = self.create_address_mapper(build_root)
build_files = mapper.scan_build_files('')
self.assertEqual(build_files,
{'BUILD', 'BUILD.other',
'dir_a/BUILD', 'dir_a/BUILD.other',
'dir_b/BUILD', 'dir_a/subdir/BUILD'})
build_files = mapper.scan_build_files('dir_a/subdir')
self.assertEqual(build_files, {'dir_a/subdir/BUILD'})
def test_scan_build_files_edge_cases(self):
with temporary_dir() as build_root:
self.create_build_files(build_root)
mapper = self.create_address_mapper(build_root)
# A non-existent dir.
build_files = mapper.scan_build_files('foo')
self.assertEqual(build_files, set())
# A dir with no BUILD files.
safe_mkdir(os.path.join(build_root, 'empty'))
build_files = mapper.scan_build_files('empty')
self.assertEqual(build_files, set())
def test_is_declaring_file(self):
scheduler = mock.Mock()
mapper = LegacyAddressMapper(scheduler, None, '')
self.assertTrue(mapper.is_declaring_file(Address('path', 'name'), 'path/BUILD'))
self.assertTrue(mapper.is_declaring_file(Address('path', 'name'), 'path/BUILD.suffix'))
self.assertFalse(mapper.is_declaring_file(Address('path', 'name'), 'path/not_a_build_file'))
self.assertFalse(mapper.is_declaring_file(Address('path', 'name'), 'differing-path/BUILD'))
def test_addresses_in_spec_path(self):
with temporary_dir() as build_root:
self.create_build_files(build_root)
mapper = self.create_address_mapper(build_root)
addresses = mapper.addresses_in_spec_path('dir_a')
self.assertEqual(addresses,
{Address('dir_a', 'a'), Address('dir_a', 'b'), Address('dir_a', 'c')})
def test_addresses_in_spec_path_no_dir(self):
with temporary_dir() as build_root:
self.create_build_files(build_root)
mapper = self.create_address_mapper(build_root)
with self.assertRaises(AddressMapper.BuildFileScanError):
mapper.addresses_in_spec_path('foo')
# TODO: https://github.com/pantsbuild/pants/issues/4025
# self.assertIn('Directory "foo" does not exist.', str(cm.exception))
def test_addresses_in_spec_path_no_build_files(self):
with temporary_dir() as build_root:
self.create_build_files(build_root)
safe_mkdir(os.path.join(build_root, 'foo'))
mapper = self.create_address_mapper(build_root)
with self.assertRaises(AddressMapper.BuildFileScanError):
mapper.addresses_in_spec_path('foo')
# TODO: https://github.com/pantsbuild/pants/issues/4025
# self.assertIn('does not contain build files.', str(cm.exception))
def test_scan_specs(self):
with temporary_dir() as build_root:
self.create_build_files(build_root)
mapper = self.create_address_mapper(build_root)
addresses = mapper.scan_specs([SingleAddress('dir_a', 'a'), SiblingAddresses('')])
self.assertEqual(addresses,
{Address('', 'a'), Address('', 'b'), Address('', 'c'), Address('dir_a', 'a')})
def test_scan_specs_bad_spec(self):
with temporary_dir() as build_root:
self.create_build_files(build_root)
mapper = self.create_address_mapper(build_root)
with self.assertRaises(AddressMapper.BuildFileScanError):
mapper.scan_specs([SingleAddress('dir_a', 'd')])
# TODO: https://github.com/pantsbuild/pants/issues/4025
# self.assertIn('not found in namespace dir_a for name "d".', str(cm.exception))
def test_scan_addresses(self):
with temporary_dir() as build_root:
self.create_build_files(build_root)
mapper = self.create_address_mapper(build_root)
addresses = mapper.scan_addresses()
self.assertEqual(addresses,
{Address('', 'a'), Address('', 'b'), Address('', 'c'),
Address('dir_a', 'a'), Address('dir_a', 'b'), Address('dir_a', 'c'),
Address('dir_b', 'a'), Address('dir_a/subdir', 'a')})
def test_scan_addresses_with_root_specified(self):
with temporary_dir() as build_root:
self.create_build_files(build_root)
mapper = self.create_address_mapper(build_root)
addresses = mapper.scan_addresses(os.path.join(build_root, 'dir_a'))
self.assertEqual(addresses,
{Address('dir_a', 'a'), Address('dir_a', 'b'), Address('dir_a', 'c'),
Address('dir_a/subdir', 'a')})
def test_scan_addresses_bad_dir(self):
# scan_addresses() should not raise an error.
with temporary_dir() as build_root:
self.create_build_files(build_root)
mapper = self.create_address_mapper(build_root)
addresses = mapper.scan_addresses(os.path.join(build_root, 'foo'))
self.assertEqual(addresses, set())
| apache-2.0 |
fzalkow/scikit-learn | examples/calibration/plot_calibration.py | 225 | 4795 | """
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see http://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Balazs Kegl <balazs.kegl@gmail.com>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
| bsd-3-clause |
GbalsaC/bitnamiP | common/lib/xmodule/xmodule/modulestore/exceptions.py | 120 | 2736 | """
Exceptions thrown by KeyStore objects
"""
class ItemNotFoundError(Exception):
pass
class ItemWriteConflictError(Exception):
pass
class InsufficientSpecificationError(Exception):
pass
class OverSpecificationError(Exception):
pass
class InvalidLocationError(Exception):
pass
class NoPathToItem(Exception):
pass
class ReferentialIntegrityError(Exception):
"""
An incorrect pointer to an object exists. For example, 2 parents point to the same child, an
xblock points to a nonexistent child (which probably raises ItemNotFoundError instead depending
on context).
"""
pass
class DuplicateItemError(Exception):
"""
Attempted to create an item which already exists.
"""
def __init__(self, element_id, store=None, collection=None):
super(DuplicateItemError, self).__init__()
self.element_id = element_id
self.store = store
self.collection = collection
def __str__(self, *args, **kwargs):
"""
Print info about what's duplicated
"""
return "{store}[{collection}] already has {element_id} ({exception})".format(
store=self.store,
collection=self.collection,
element_id=self.element_id,
exception=Exception.__str__(self, *args, **kwargs),
)
class VersionConflictError(Exception):
"""
The caller asked for either draft or published head and gave a version which conflicted with it.
"""
def __init__(self, requestedLocation, currentHeadVersionGuid):
super(VersionConflictError, self).__init__(u'Requested {}, but current head is {}'.format(
requestedLocation,
currentHeadVersionGuid
))
class DuplicateCourseError(Exception):
"""
An attempt to create a course whose id duplicates an existing course's
"""
def __init__(self, course_id, existing_entry):
"""
existing_entry will have the who, when, and other properties of the existing entry
"""
super(DuplicateCourseError, self).__init__(
u'Cannot create course {}, which duplicates {}'.format(course_id, existing_entry)
)
self.course_id = course_id
self.existing_entry = existing_entry
class InvalidBranchSetting(Exception):
"""
Raised when the process' branch setting did not match the required setting for the attempted operation on a store.
"""
def __init__(self, expected_setting, actual_setting):
super(InvalidBranchSetting, self).__init__(u"Invalid branch: expected {} but got {}".format(expected_setting, actual_setting))
self.expected_setting = expected_setting
self.actual_setting = actual_setting
| agpl-3.0 |
geminy/aidear | oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/v8/tools/testrunner/local/commands.py | 7 | 4293 | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import subprocess
import sys
from threading import Timer
from ..local import utils
from ..objects import output
SEM_INVALID_VALUE = -1
SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
def Win32SetErrorMode(mode):
prev_error_mode = SEM_INVALID_VALUE
try:
import ctypes
prev_error_mode = \
ctypes.windll.kernel32.SetErrorMode(mode) #@UndefinedVariable
except ImportError:
pass
return prev_error_mode
def RunProcess(verbose, timeout, args, **rest):
if verbose: print "#", " ".join(args)
popen_args = args
prev_error_mode = SEM_INVALID_VALUE
if utils.IsWindows():
popen_args = subprocess.list2cmdline(args)
# Try to change the error mode to avoid dialogs on fatal errors. Don't
# touch any existing error mode flags by merging the existing error mode.
# See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
error_mode = SEM_NOGPFAULTERRORBOX
prev_error_mode = Win32SetErrorMode(error_mode)
Win32SetErrorMode(error_mode | prev_error_mode)
try:
process = subprocess.Popen(
args=popen_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**rest
)
except Exception as e:
sys.stderr.write("Error executing: %s\n" % popen_args)
raise e
if (utils.IsWindows() and prev_error_mode != SEM_INVALID_VALUE):
Win32SetErrorMode(prev_error_mode)
def kill_process(process, timeout_result):
timeout_result[0] = True
try:
if utils.IsWindows():
if verbose:
print "Attempting to kill process %d" % process.pid
sys.stdout.flush()
tk = subprocess.Popen(
'taskkill /T /F /PID %d' % process.pid,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = tk.communicate()
if verbose:
print "Taskkill results for %d" % process.pid
print stdout
print stderr
print "Return code: %d" % tk.returncode
sys.stdout.flush()
else:
process.kill()
except OSError:
sys.stderr.write('Error: Process %s already ended.\n' % process.pid)
# Pseudo object to communicate with timer thread.
timeout_result = [False]
timer = Timer(timeout, kill_process, [process, timeout_result])
timer.start()
stdout, stderr = process.communicate()
timer.cancel()
return output.Output(
process.returncode,
timeout_result[0],
stdout.decode('utf-8', 'replace').encode('utf-8'),
stderr.decode('utf-8', 'replace').encode('utf-8'),
process.pid,
)
def Execute(args, verbose=False, timeout=None):
args = [ c for c in args if c != "" ]
return RunProcess(verbose, timeout, args=args)
| gpl-3.0 |
TeamEOS/external_chromium_org | tools/deep_memory_profiler/lib/symbol.py | 99 | 7171 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import sys
_BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
_FIND_RUNTIME_SYMBOLS_PATH = os.path.join(_BASE_PATH,
os.pardir,
'find_runtime_symbols')
_TOOLS_LINUX_PATH = os.path.join(_BASE_PATH,
os.pardir,
'linux')
sys.path.append(_FIND_RUNTIME_SYMBOLS_PATH)
sys.path.append(_TOOLS_LINUX_PATH)
import find_runtime_symbols
import prepare_symbol_info
import procfs # pylint: disable=W0611,F0401
LOGGER = logging.getLogger('dmprof')
FUNCTION_SYMBOLS = find_runtime_symbols.FUNCTION_SYMBOLS
SOURCEFILE_SYMBOLS = find_runtime_symbols.SOURCEFILE_SYMBOLS
TYPEINFO_SYMBOLS = find_runtime_symbols.TYPEINFO_SYMBOLS
class SymbolDataSources(object):
"""Manages symbol data sources in a process.
The symbol data sources consist of maps (/proc/<pid>/maps), nm, readelf and
so on. They are collected into a directory '|prefix|.symmap' from the binary
files by 'prepare()' with tools/find_runtime_symbols/prepare_symbol_info.py.
Binaries are not mandatory to profile. The prepared data sources work in
place of the binary even if the binary has been overwritten with another
binary.
Note that loading the symbol data sources takes a long time. They are often
very big. So, the 'dmprof' profiler is designed to use 'SymbolMappingCache'
which caches actually used symbols.
"""
def __init__(self, prefix, alternative_dirs=None):
self._prefix = prefix
self._prepared_symbol_data_sources_path = None
self._loaded_symbol_data_sources = None
self._alternative_dirs = alternative_dirs or {}
def prepare(self):
"""Prepares symbol data sources by extracting mapping from a binary.
The prepared symbol data sources are stored in a directory. The directory
name is stored in |self._prepared_symbol_data_sources_path|.
Returns:
True if succeeded.
"""
LOGGER.info('Preparing symbol mapping...')
self._prepared_symbol_data_sources_path, used_tempdir = (
prepare_symbol_info.prepare_symbol_info(
self._prefix + '.maps',
output_dir_path=self._prefix + '.symmap',
alternative_dirs=self._alternative_dirs,
use_tempdir=True,
use_source_file_name=True))
if self._prepared_symbol_data_sources_path:
LOGGER.info(' Prepared symbol mapping.')
if used_tempdir:
LOGGER.warn(' Using a temporary directory for symbol mapping.')
LOGGER.warn(' Delete it by yourself.')
LOGGER.warn(' Or, move the directory by yourself to use it later.')
return True
else:
LOGGER.warn(' Failed to prepare symbol mapping.')
return False
def get(self):
"""Returns the prepared symbol data sources.
Returns:
The prepared symbol data sources. None if failed.
"""
if not self._prepared_symbol_data_sources_path and not self.prepare():
return None
if not self._loaded_symbol_data_sources:
LOGGER.info('Loading symbol mapping...')
self._loaded_symbol_data_sources = (
find_runtime_symbols.RuntimeSymbolsInProcess.load(
self._prepared_symbol_data_sources_path))
return self._loaded_symbol_data_sources
def path(self):
"""Returns the path of the prepared symbol data sources if possible."""
if not self._prepared_symbol_data_sources_path and not self.prepare():
return None
return self._prepared_symbol_data_sources_path
class SymbolFinder(object):
"""Finds corresponding symbols from addresses.
This class does only 'find()' symbols from a specified |address_list|.
It is introduced to make a finder mockable.
"""
def __init__(self, symbol_type, symbol_data_sources):
self._symbol_type = symbol_type
self._symbol_data_sources = symbol_data_sources
def find(self, address_list):
return find_runtime_symbols.find_runtime_symbols(
self._symbol_type, self._symbol_data_sources.get(), address_list)
class SymbolMappingCache(object):
"""Caches mapping from actually used addresses to symbols.
'update()' updates the cache from the original symbol data sources via
'SymbolFinder'. Symbols can be looked up by the method 'lookup()'.
"""
def __init__(self):
self._symbol_mapping_caches = {
FUNCTION_SYMBOLS: {},
SOURCEFILE_SYMBOLS: {},
TYPEINFO_SYMBOLS: {},
}
def update(self, symbol_type, bucket_set, symbol_finder, cache_f):
"""Updates symbol mapping cache on memory and in a symbol cache file.
It reads cached symbol mapping from a symbol cache file |cache_f| if it
exists. Unresolved addresses are then resolved and added to the cache
both on memory and in the symbol cache file with using 'SymbolFinder'.
A cache file is formatted as follows:
<Address> <Symbol>
<Address> <Symbol>
<Address> <Symbol>
...
Args:
symbol_type: A type of symbols to update. It should be one of
FUNCTION_SYMBOLS, SOURCEFILE_SYMBOLS and TYPEINFO_SYMBOLS.
bucket_set: A BucketSet object.
symbol_finder: A SymbolFinder object to find symbols.
cache_f: A readable and writable IO object of the symbol cache file.
"""
cache_f.seek(0, os.SEEK_SET)
self._load(cache_f, symbol_type)
unresolved_addresses = sorted(
address for address in bucket_set.iter_addresses(symbol_type)
if address not in self._symbol_mapping_caches[symbol_type])
if not unresolved_addresses:
LOGGER.info('No need to resolve any more addresses.')
return
cache_f.seek(0, os.SEEK_END)
LOGGER.info('Loading %d unresolved addresses.' %
len(unresolved_addresses))
symbol_dict = symbol_finder.find(unresolved_addresses)
for address, symbol in symbol_dict.iteritems():
stripped_symbol = symbol.strip() or '?'
self._symbol_mapping_caches[symbol_type][address] = stripped_symbol
cache_f.write('%x %s\n' % (address, stripped_symbol))
def lookup(self, symbol_type, address):
"""Looks up a symbol for a given |address|.
Args:
symbol_type: A type of symbols to update. It should be one of
FUNCTION_SYMBOLS, SOURCEFILE_SYMBOLS and TYPEINFO_SYMBOLS.
address: An integer that represents an address.
Returns:
A string that represents a symbol.
"""
return self._symbol_mapping_caches[symbol_type].get(address)
def _load(self, cache_f, symbol_type):
try:
for line in cache_f:
items = line.rstrip().split(None, 1)
if len(items) == 1:
items.append('??')
self._symbol_mapping_caches[symbol_type][int(items[0], 16)] = items[1]
LOGGER.info('Loaded %d entries from symbol cache.' %
len(self._symbol_mapping_caches[symbol_type]))
except IOError as e:
LOGGER.info('The symbol cache file is invalid: %s' % e)
| bsd-3-clause |
gorczynski/dotfiles | vim/bundle/powerline/tests/test_listers.py | 8 | 5049 | # vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import powerline.listers.i3wm as i3wm
from tests.lib import Args, replace_attr, Pl
from tests import TestCase
class TestI3WM(TestCase):
@staticmethod
def get_workspaces():
return iter([
{'name': '1: w1', 'output': 'LVDS1', 'focused': False, 'urgent': False, 'visible': False},
{'name': '2: w2', 'output': 'LVDS1', 'focused': False, 'urgent': False, 'visible': True},
{'name': '3: w3', 'output': 'HDMI1', 'focused': False, 'urgent': True, 'visible': True},
{'name': '4: w4', 'output': 'DVI01', 'focused': True, 'urgent': True, 'visible': True},
])
@staticmethod
def get_outputs(pl):
return iter([
{'name': 'LVDS1'},
{'name': 'HDMI1'},
{'name': 'DVI01'},
])
def test_output_lister(self):
pl = Pl()
with replace_attr(i3wm, 'get_connected_xrandr_outputs', self.get_outputs):
self.assertEqual(
list(i3wm.output_lister(pl=pl, segment_info={'a': 1})),
[
({'a': 1, 'output': 'LVDS1'}, {'draw_inner_divider': None}),
({'a': 1, 'output': 'HDMI1'}, {'draw_inner_divider': None}),
({'a': 1, 'output': 'DVI01'}, {'draw_inner_divider': None}),
]
)
def test_workspace_lister(self):
pl = Pl()
with replace_attr(i3wm, 'get_i3_connection', lambda: Args(get_workspaces=self.get_workspaces)):
self.assertEqual(
list(i3wm.workspace_lister(pl=pl, segment_info={'a': 1})),
[
({
'a': 1,
'output': 'LVDS1',
'workspace': {
'name': '1: w1',
'focused': False,
'urgent': False,
'visible': False
}
}, {'draw_inner_divider': None}),
({
'a': 1,
'output': 'LVDS1',
'workspace': {
'name': '2: w2',
'focused': False,
'urgent': False,
'visible': True
}
}, {'draw_inner_divider': None}),
({
'a': 1,
'output': 'HDMI1',
'workspace': {
'name': '3: w3',
'focused': False,
'urgent': True,
'visible': True
}
}, {'draw_inner_divider': None}),
({
'a': 1,
'output': 'DVI01',
'workspace': {
'name': '4: w4',
'focused': True,
'urgent': True,
'visible': True
}
}, {'draw_inner_divider': None}),
]
)
self.assertEqual(
list(i3wm.workspace_lister(pl=pl, segment_info={'a': 1}, output='LVDS1')),
[
({
'a': 1,
'output': 'LVDS1',
'workspace': {
'name': '1: w1',
'focused': False,
'urgent': False,
'visible': False
}
}, {'draw_inner_divider': None}),
({
'a': 1,
'output': 'LVDS1',
'workspace': {
'name': '2: w2',
'focused': False,
'urgent': False,
'visible': True
}
}, {'draw_inner_divider': None}),
]
)
self.assertEqual(
list(i3wm.workspace_lister(
pl=pl,
segment_info={'a': 1, 'output': 'LVDS1'}
)),
[
({
'a': 1,
'output': 'LVDS1',
'workspace': {
'name': '1: w1',
'focused': False,
'urgent': False,
'visible': False
}
}, {'draw_inner_divider': None}),
({
'a': 1,
'output': 'LVDS1',
'workspace': {
'name': '2: w2',
'focused': False,
'urgent': False,
'visible': True
}
}, {'draw_inner_divider': None}),
]
)
self.assertEqual(
list(i3wm.workspace_lister(
pl=pl,
segment_info={'a': 1, 'output': 'LVDS1'},
output=False
)),
[
({
'a': 1,
'output': 'LVDS1',
'workspace': {
'name': '1: w1',
'focused': False,
'urgent': False,
'visible': False
}
}, {'draw_inner_divider': None}),
({
'a': 1,
'output': 'LVDS1',
'workspace': {
'name': '2: w2',
'focused': False,
'urgent': False,
'visible': True
}
}, {'draw_inner_divider': None}),
({
'a': 1,
'output': 'HDMI1',
'workspace': {
'name': '3: w3',
'focused': False,
'urgent': True,
'visible': True
}
}, {'draw_inner_divider': None}),
({
'a': 1,
'output': 'DVI01',
'workspace': {
'name': '4: w4',
'focused': True,
'urgent': True,
'visible': True
}
}, {'draw_inner_divider': None}),
]
)
self.assertEqual(
list(i3wm.workspace_lister(
pl=pl,
segment_info={'a': 1},
only_show=['focused', 'urgent']
)),
[
({
'a': 1,
'output': 'HDMI1',
'workspace': {
'name': '3: w3',
'focused': False,
'urgent': True,
'visible': True
}
}, {'draw_inner_divider': None}),
({
'a': 1,
'output': 'DVI01',
'workspace': {
'name': '4: w4',
'focused': True,
'urgent': True,
'visible': True
}
}, {'draw_inner_divider': None}),
]
)
if __name__ == '__main__':
from tests import main
main()
| gpl-3.0 |
GhostThrone/django | tests/string_lookup/models.py | 281 | 1533 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Foo(models.Model):
name = models.CharField(max_length=50)
friend = models.CharField(max_length=50, blank=True)
def __str__(self):
return "Foo %s" % self.name
@python_2_unicode_compatible
class Bar(models.Model):
name = models.CharField(max_length=50)
normal = models.ForeignKey(Foo, models.CASCADE, related_name='normal_foo')
fwd = models.ForeignKey("Whiz", models.CASCADE)
back = models.ForeignKey("Foo", models.CASCADE)
def __str__(self):
return "Bar %s" % self.place.name
@python_2_unicode_compatible
class Whiz(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return "Whiz %s" % self.name
@python_2_unicode_compatible
class Child(models.Model):
parent = models.OneToOneField('Base', models.CASCADE)
name = models.CharField(max_length=50)
def __str__(self):
return "Child %s" % self.name
@python_2_unicode_compatible
class Base(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return "Base %s" % self.name
@python_2_unicode_compatible
class Article(models.Model):
name = models.CharField(max_length=50)
text = models.TextField()
submitted_from = models.GenericIPAddressField(blank=True, null=True)
def __str__(self):
return "Article %s" % self.name
| bsd-3-clause |
y0sh1/iozone-results-comparator | src/regression_line.py | 8 | 2379 | #!/usr/bin/python
# Copyright (C) 2013
# Adam Okuliar aokuliar at redhat dot com
# Jiri Hladky hladky dot jiri at gmail dot com
# Petr Benas petrbenas at gmail dot com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy
from scipy import stats
class RegressionLine:
def __init__(self):
self.points = [] # vector of points to be regressed
self.xVals = []
self.yVals = []
# computed attributes
self.slope = 0
self.stdError = 0
self.confIntMax = 0
self.confIntMin = 0
def addPoint(self, x, y):
self.points.append((x, y))
self.xVals.append(x)
self.yVals.append(y)
def computeSlope(self):
x = numpy.array(self.xVals)
y = numpy.array(self.yVals)
AverageX = numpy.mean(self.xVals)
# slope a solves
# a^2 * Sum[xi yi] + a * Sum [xi^2 - yi^2] - Sum [xi yi] = 0
A = numpy.sum(x*y)
B = numpy.sum([x**2 - y**2])
discriminant = numpy.sqrt( B**2 + 4 * A**2)
a = ( -B + discriminant ) / ( 2 * A )
self.slope = a
if len(self.xVals) == 1:
self.stdError = 0
self.confIntMax = self.confIntMin = a
return
# distance of points from line with slope=a
D = numpy.abs(a*x-y) / numpy.sqrt(a**2 + 1)
# standard error of a
a_se = numpy.sqrt( numpy.sum(D**2) / numpy.sum((x - AverageX)**2) / (len(x) - 1) )
# 90% confidence interval
h = a_se * stats.t._ppf((1+0.90)/2., len(x)-1)
self.stdError = a_se
self.confIntMax = a + h
self.confIntMin = a - h
if __name__ == '__main__':
print 'Try running iozone_results_comparator.py'
| gpl-3.0 |
liangazhou/django-rdp | packages/PyDev/plugins/org.python.pydev.jython_4.4.0.201510052309/Lib/unittest/main.py | 115 | 9083 | """Unittest main program"""
import sys
import os
import types
from . import loader, runner
from .signals import installHandler
__unittest = True
FAILFAST = " -f, --failfast Stop on first failure\n"
CATCHBREAK = " -c, --catch Catch control-C and display results\n"
BUFFEROUTPUT = " -b, --buffer Buffer stdout and stderr during test runs\n"
USAGE_AS_MAIN = """\
Usage: %(progName)s [options] [tests]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
%(failfast)s%(catchbreak)s%(buffer)s
Examples:
%(progName)s test_module - run tests from test_module
%(progName)s module.TestClass - run tests from module.TestClass
%(progName)s module.Class.test_method - run specified test method
[tests] can be a list of any number of test modules, classes and test
methods.
Alternative Usage: %(progName)s discover [options]
Options:
-v, --verbose Verbose output
%(failfast)s%(catchbreak)s%(buffer)s -s directory Directory to start discovery ('.' default)
-p pattern Pattern to match test files ('test*.py' default)
-t directory Top level directory of project (default to
start directory)
For test discovery all test modules must be importable from the top
level directory of the project.
"""
USAGE_FROM_MODULE = """\
Usage: %(progName)s [options] [test] [...]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
%(failfast)s%(catchbreak)s%(buffer)s
Examples:
%(progName)s - run default set of tests
%(progName)s MyTestSuite - run suite 'MyTestSuite'
%(progName)s MyTestCase.testSomething - run MyTestCase.testSomething
%(progName)s MyTestCase - run all 'test*' test methods
in MyTestCase
"""
class TestProgram(object):
"""A command-line program that runs a set of tests; this is primarily
for making test modules conveniently executable.
"""
USAGE = USAGE_FROM_MODULE
# defaults for testing
failfast = catchbreak = buffer = progName = None
def __init__(self, module='__main__', defaultTest=None, argv=None,
testRunner=None, testLoader=loader.defaultTestLoader,
exit=True, verbosity=1, failfast=None, catchbreak=None,
buffer=None):
if isinstance(module, basestring):
self.module = __import__(module)
for part in module.split('.')[1:]:
self.module = getattr(self.module, part)
else:
self.module = module
if argv is None:
argv = sys.argv
self.exit = exit
self.failfast = failfast
self.catchbreak = catchbreak
self.verbosity = verbosity
self.buffer = buffer
self.defaultTest = defaultTest
self.testRunner = testRunner
self.testLoader = testLoader
self.progName = os.path.basename(argv[0])
self.parseArgs(argv)
self.runTests()
def usageExit(self, msg=None):
if msg:
print msg
usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
'buffer': ''}
if self.failfast != False:
usage['failfast'] = FAILFAST
if self.catchbreak != False:
usage['catchbreak'] = CATCHBREAK
if self.buffer != False:
usage['buffer'] = BUFFEROUTPUT
print self.USAGE % usage
sys.exit(2)
def parseArgs(self, argv):
if len(argv) > 1 and argv[1].lower() == 'discover':
self._do_discovery(argv[2:])
return
import getopt
long_opts = ['help', 'verbose', 'quiet', 'failfast', 'catch', 'buffer']
try:
options, args = getopt.getopt(argv[1:], 'hHvqfcb', long_opts)
for opt, value in options:
if opt in ('-h','-H','--help'):
self.usageExit()
if opt in ('-q','--quiet'):
self.verbosity = 0
if opt in ('-v','--verbose'):
self.verbosity = 2
if opt in ('-f','--failfast'):
if self.failfast is None:
self.failfast = True
# Should this raise an exception if -f is not valid?
if opt in ('-c','--catch'):
if self.catchbreak is None:
self.catchbreak = True
# Should this raise an exception if -c is not valid?
if opt in ('-b','--buffer'):
if self.buffer is None:
self.buffer = True
# Should this raise an exception if -b is not valid?
if len(args) == 0 and self.defaultTest is None:
# createTests will load tests from self.module
self.testNames = None
elif len(args) > 0:
self.testNames = args
if __name__ == '__main__':
# to support python -m unittest ...
self.module = None
else:
self.testNames = (self.defaultTest,)
self.createTests()
except getopt.error, msg:
self.usageExit(msg)
def createTests(self):
if self.testNames is None:
self.test = self.testLoader.loadTestsFromModule(self.module)
else:
self.test = self.testLoader.loadTestsFromNames(self.testNames,
self.module)
def _do_discovery(self, argv, Loader=None):
if Loader is None:
Loader = lambda: self.testLoader
# handle command line args for test discovery
self.progName = '%s discover' % self.progName
import optparse
parser = optparse.OptionParser()
parser.prog = self.progName
parser.add_option('-v', '--verbose', dest='verbose', default=False,
help='Verbose output', action='store_true')
if self.failfast != False:
parser.add_option('-f', '--failfast', dest='failfast', default=False,
help='Stop on first fail or error',
action='store_true')
if self.catchbreak != False:
parser.add_option('-c', '--catch', dest='catchbreak', default=False,
help='Catch ctrl-C and display results so far',
action='store_true')
if self.buffer != False:
parser.add_option('-b', '--buffer', dest='buffer', default=False,
help='Buffer stdout and stderr during tests',
action='store_true')
parser.add_option('-s', '--start-directory', dest='start', default='.',
help="Directory to start discovery ('.' default)")
parser.add_option('-p', '--pattern', dest='pattern', default='test*.py',
help="Pattern to match tests ('test*.py' default)")
parser.add_option('-t', '--top-level-directory', dest='top', default=None,
help='Top level directory of project (defaults to start directory)')
options, args = parser.parse_args(argv)
if len(args) > 3:
self.usageExit()
for name, value in zip(('start', 'pattern', 'top'), args):
setattr(options, name, value)
# only set options from the parsing here
# if they weren't set explicitly in the constructor
if self.failfast is None:
self.failfast = options.failfast
if self.catchbreak is None:
self.catchbreak = options.catchbreak
if self.buffer is None:
self.buffer = options.buffer
if options.verbose:
self.verbosity = 2
start_dir = options.start
pattern = options.pattern
top_level_dir = options.top
loader = Loader()
self.test = loader.discover(start_dir, pattern, top_level_dir)
def runTests(self):
if self.catchbreak:
installHandler()
if self.testRunner is None:
self.testRunner = runner.TextTestRunner
if isinstance(self.testRunner, (type, types.ClassType)):
try:
testRunner = self.testRunner(verbosity=self.verbosity,
failfast=self.failfast,
buffer=self.buffer)
except TypeError:
# didn't accept the verbosity, buffer or failfast arguments
testRunner = self.testRunner()
else:
# it is assumed to be a TestRunner instance
testRunner = self.testRunner
self.result = testRunner.run(self.test)
if self.exit:
sys.exit(not self.result.wasSuccessful())
main = TestProgram
| apache-2.0 |
volcanoauthors/volcano-ci-tests | src/gn/toolchain/win/recursive_mirror.py | 2 | 1029 | #!/usr/bin/env python
# Copyright (c) 2017-2018 the Volcano Authors. All rights reserved.
# Licensed under the GPLv3.
import os
import shutil
import sys
def main(source, dest):
"""Emulation of rm -rf out && cp -af in out."""
if os.path.exists(dest):
if os.path.isdir(dest):
def _on_error(fn, path, excinfo):
# The operation failed, possibly because the file is set to
# read-only. If that's why, make it writable and try the op again.
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWRITE)
fn(path)
shutil.rmtree(dest, onerror=_on_error)
else:
if not os.access(dest, os.W_OK):
# Attempt to make the file writable before deleting it.
os.chmod(dest, stat.S_IWRITE)
os.unlink(dest)
if os.path.isdir(source):
shutil.copytree(source, dest)
else:
shutil.copy2(source, dest)
# "touch" the file (windows mtime bug in shutil.copy2).
os.utime(dest, None)
if __name__ == '__main__':
sys.exit(main(*sys.argv[1:]))
| gpl-3.0 |
kvar/ansible | hacking/build_library/build_ansible/command_plugins/porting_guide.py | 4 | 3254 | # coding: utf-8
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import argparse
import os.path
import sys
from jinja2 import Environment, DictLoader
# Pylint doesn't understand Python3 namespace modules.
from ..commands import Command # pylint: disable=relative-beyond-top-level
PORTING_GUIDE_TEMPLATE = """
.. _porting_{{ ver }}_guide:
*************************
Ansible {{ ver }} Porting Guide
*************************
This section discusses the behavioral changes between Ansible {{ prev_ver }} and Ansible {{ ver }}.
It is intended to assist in updating your playbooks, plugins and other parts of your Ansible infrastructure so they will work with this version of Ansible.
We suggest you read this page along with `Ansible Changelog for {{ ver }} <https://github.com/ansible/ansible/blob/devel/changelogs/CHANGELOG-v{{ ver }}.rst>`_ to understand what updates you may need to make.
This document is part of a collection on porting. The complete list of porting guides can be found at :ref:`porting guides <porting_guides>`.
.. contents:: Topics
Playbook
========
No notable changes
Command Line
============
No notable changes
Deprecated
==========
No notable changes
Modules
=======
No notable changes
Modules removed
---------------
The following modules no longer exist:
* No notable changes
Deprecation notices
-------------------
No notable changes
Noteworthy module changes
-------------------------
No notable changes
Plugins
=======
No notable changes
Porting custom scripts
======================
No notable changes
Networking
==========
No notable changes
""" # noqa for E501 (line length).
# jinja2 is horrid about getting rid of extra newlines so we have to have a single line per
# paragraph for proper wrapping to occur
JINJA_ENV = Environment(
loader=DictLoader({'porting_guide': PORTING_GUIDE_TEMPLATE,
}),
extensions=['jinja2.ext.i18n'],
trim_blocks=True,
lstrip_blocks=True,
)
def generate_porting_guide(version):
template = JINJA_ENV.get_template('porting_guide')
version_list = version.split('.')
version_list[-1] = str(int(version_list[-1]) - 1)
previous_version = '.'.join(version_list)
content = template.render(ver=version, prev_ver=previous_version)
return content
def write_guide(version, guide_content):
filename = 'porting_guide_{0}.rst'.format(version)
with open(filename, 'w') as out_file:
out_file.write(guide_content)
class PortingGuideCommand(Command):
name = 'porting-guide'
@classmethod
def init_parser(cls, add_parser):
parser = add_parser(cls.name, description="Generate a fresh porting guide template")
parser.add_argument("--version", dest="version", type=str, required=True, action='store',
help="Version of Ansible to write the porting guide for")
@staticmethod
def main(args):
guide_content = generate_porting_guide(args.version)
write_guide(args.version, guide_content)
return 0
| gpl-3.0 |
bhavin04890/finaldashboard | modules/eden/vulnerability.py | 3 | 17256 | # -*- coding: utf-8 -*-
""" Sahana Eden Vulnerability Model
@copyright: 2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3VulnerabilityModel",
]
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class S3VulnerabilityModel(S3Model):
"""
Vulnerability Management
"""
names = ["vulnerability_indicator",
"vulnerability_aggregated_indicator",
"vulnerability_data",
"vulnerability_resilience_id",
"vulnerability_ids",
"vulnerability_resilience",
]
resilience_pid = None # id of the resilience indicator
indicator_pids = None # List of ids used to calculate the resilence indicator
def model(self):
T = current.T
db = current.db
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
super_link = self.super_link
# ---------------------------------------------------------------------
# Vulnerability Indicator
#
tablename = "vulnerability_indicator"
table = define_table(tablename,
super_link("parameter_id", "stats_parameter"),
Field("posn", "integer"),
Field("name",
label = T("Name")),
s3_comments("description",
label = T("Description")),
*s3_meta_fields()
)
# CRUD Strings
ADD_VULNERABILITY = T("Add Vulnerability Indicator")
crud_strings[tablename] = Storage(
title_create = ADD_VULNERABILITY,
title_display = T("Vulnerability Indicator Details"),
title_list = T("Vulnerability Indicators"),
title_update = T("Edit Vulnerability Indicator"),
title_search = T("Search Vulnerability Indicators"),
title_upload = T("Import Vulnerability Indicator"),
subtitle_create = T("Add New Vulnerability Indicator"),
label_list_button = T("List Vulnerability Indicators"),
label_create_button = ADD_VULNERABILITY,
msg_record_created = T("Vulnerability Indicator added"),
msg_record_modified = T("Vulnerability Indicator updated"),
msg_record_deleted = T("Vulnerability Indicator deleted"),
msg_list_empty = T("No vulnerability indicators currently defined"))
configure(tablename,
super_entity = "stats_parameter",
deduplicate = self.vulnerability_indicator_duplicate,
)
# ---------------------------------------------------------------------
# Vulnerability Aggregated Indicator
#
tablename = "vulnerability_aggregated_indicator"
table = define_table(tablename,
super_link("parameter_id", "stats_parameter"),
Field("name",
label = T("Name")),
s3_comments("description",
label = T("Description")),
*s3_meta_fields()
)
# CRUD Strings
ADD_VULNERABILITY = T("Add Vulnerability Aggregated Indicator")
crud_strings[tablename] = Storage(
title_create = ADD_VULNERABILITY,
title_display = T("Vulnerability Aggregated Indicator Details"),
title_list = T("Vulnerability Aggregated Indicators"),
title_update = T("Edit Vulnerability Aggregated Indicator"),
title_search = T("Search Vulnerability Aggregated Indicators"),
title_upload = T("Import Vulnerability Aggregated Indicator"),
subtitle_create = T("Add New Vulnerability Aggregated Indicator"),
label_list_button = T("List Vulnerability Aggregated Indicators"),
label_create_button = ADD_VULNERABILITY,
msg_record_created = T("Vulnerability Aggregated Indicator added"),
msg_record_modified = T("Vulnerability Aggregated Indicator updated"),
msg_record_deleted = T("Vulnerability Aggregated Indicator deleted"),
msg_list_empty = T("No vulnerability aggregated indicators currently defined"))
configure(tablename,
super_entity = "stats_parameter",
deduplicate = self.vulnerability_indicator_duplicate,
)
# ---------------------------------------------------------------------
# Vulnerability Data
#
tablename = "vulnerability_data"
table = define_table(tablename,
super_link("data_id", "stats_data"),
self.stats_param_id(
label = T("Indicator"),
requires = IS_ONE_OF(db, "stats_parameter.parameter_id",
self.stats_parameter_represent,
filterby="instance_type",
filter_opts=["vulnerability_indicator"],
orderby="stats_parameter.name",
sort=True)
),
self.gis_location_id(
widget = S3LocationAutocompleteWidget(),
requires = IS_LOCATION()
),
Field("value", "double",
label = T("Value")),
s3_date(),
# Unused but needed for the stats_data SE
Field("date_end", "date",
readable=False,
writable=False
),
self.stats_group_id(),
*s3_meta_fields()
)
# CRUD Strings
ADD_DATA = T("Add Vulnerability Data")
crud_strings[tablename] = Storage(
title_create = ADD_DATA,
title_display = T("Vulnerability Data Details"),
title_list = T("Vulnerability Data"),
title_update = T("Edit Vulnerability Data"),
title_search = T("Search Vulnerability Data"),
title_upload = T("Import Vulnerability Data"),
subtitle_create = T("Add New Vulnerability Data"),
label_list_button = T("List Vulnerability Data"),
label_create_button = ADD_DATA,
msg_record_created = T("Vulnerability Data added"),
msg_record_modified = T("Vulnerability Data updated"),
msg_record_deleted = T("Vulnerability Data deleted"),
msg_list_empty = T("No vulnerability data currently defined"))
configure(tablename,
super_entity = "stats_data",
deduplicate = self.vulnerability_data_duplicate,
requires_approval=True,
)
# ---------------------------------------------------------------------
# Pass model-global names to response.s3
#
return Storage(
vulnerability_resilience_id = self.vulnerability_resilience_id,
vulnerability_ids = self.vulnerability_ids,
vulnerability_resilience = self.vulnerability_resilience,
)
# -------------------------------------------------------------------------
def defaults(self):
""" Safe defaults if the module is disabled """
return Storage(
vulnerability_resilience_id = lambda i: [],
vulnerability_ids = lambda i: None,
)
# -------------------------------------------------------------------------
@staticmethod
def vulnerability_resilience_id():
"""
Return the parameter_id of the resilience indicator
"""
if S3VulnerabilityModel.resilience_pid is None:
# Get the parameter_id of the aggregated_indicator
table = current.s3db.vulnerability_aggregated_indicator
query = (table.uuid == "Resilience") & \
(table.deleted == False)
row = current.db(query).select(table.parameter_id,
limitby=(0, 1)).first()
try:
S3VulnerabilityModel.resilience_pid = row.parameter_id
except:
# DB not initialised
pass
return S3VulnerabilityModel.resilience_pid
# -------------------------------------------------------------------------
@staticmethod
def vulnerability_ids():
"""
Return a list of the parameter_id's that are to be used when
calculating the resilience indicator
"""
if S3VulnerabilityModel.indicator_pids is None:
table = current.s3db.vulnerability_indicator
query = (table.deleted == False)
rows = current.db(query).select(table.parameter_id)
S3VulnerabilityModel.indicator_pids = [i.parameter_id for i in rows]
return S3VulnerabilityModel.indicator_pids
# -------------------------------------------------------------------------
@staticmethod
def vulnerability_resilience(loc_level,
location_id,
resilience_pid,
indicator_pids,
date_period_start,
date_period_end,
use_location,
):
"""
Calculates the resilience held in the vulnerability_data table
for a specific location and time period.
This is run async
Where appropriate add test cases to modules/unit_tests/eden/stats.py
"""
db = current.db
s3db = current.s3db
vtable = s3db.vulnerability_data
stable = s3db.stats_aggregate
# Get the data from the vulnerability_data table
query = (vtable.deleted != True) & \
(vtable.approved_by != None) & \
(vtable.parameter_id.belongs(indicator_pids))
ward_count = 1
if use_location:
query &= (vtable.location_id == location_id)
else:
# Get all the child locations
child_locations = current.gis.get_children(location_id, loc_level)
child_ids = [row.id for row in child_locations]
ward_count = len(child_ids)
query &= (vtable.location_id.belongs(child_ids))
if date_period_end is None:
pass
elif date_period_end == "None":
date_period_end = None
else:
query &= (vtable.date <= date_period_end)
rows = db(query).select(vtable.parameter_id,
vtable.location_id,
vtable.value,
vtable.date,
orderby=(vtable.location_id,
vtable.parameter_id,
~vtable.date
)
)
# The query may return duplicate records for the same
# location+parameter: use the most recent, which because
# of the ordering will be the first
values = []
append = values.append
locations = []
new_location = locations.append
last_record = (0, 0)
for row in rows:
value = row.value
if not value:
continue
l = row.location_id
key = (l, row.parameter_id)
if last_record != key:
last_record = key
append(value)
if l not in locations:
new_location(l)
# Aggregate the values
values_len = len(values)
if not values_len:
return
import numpy
values_sum = sum(values)
values_min = min(values)
values_max = max(values)
values_avg = float(values_sum) / values_len
values_med = numpy.median(values)
values_mad = numpy.median([abs(v - values_med) for v in values])
reported_count = len(locations)
# Store Resilience value in the stats_aggregate table
query = (stable.location_id == location_id) & \
(stable.date == date_period_start) & \
(stable.parameter_id == resilience_pid)
record = db(query).select(stable.id,
limitby=(0, 1)).first()
if record:
# Update
db(query).update(date = date_period_start,
end_date = date_period_end,
reported_count = reported_count,
ward_count = ward_count,
min = values_min,
max = values_max,
mean = values_avg,
median = values_med,
mad = values_mad,
)
else:
# Insert new
id = stable.insert(agg_type = 4, # indicator
parameter_id = resilience_pid,
location_id = location_id,
date = date_period_start,
end_date = date_period_end,
reported_count = reported_count,
ward_count = ward_count,
min = values_min,
max = values_max,
mean = values_avg,
median = values_med,
mad = values_mad,
)
return
# -------------------------------------------------------------------------
@staticmethod
def vulnerability_indicator_duplicate(item):
""" Import item de-duplication """
if (item.tablename == "vulnerability_indicator") or \
(item.tablename == "vulnerability_aggregated_indicator"):
table = item.table
name = item.data.get("name", None)
query = (table.name.lower() == name.lower())
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def vulnerability_data_duplicate(item):
""" Import item de-duplication """
if item.tablename == "vulnerability_data":
data = item.data
param = data.get("parameter_id", None)
location = data.get("location_id", None)
date = data.get("date", None)
table = item.table
query = (table.parameter_id == param) & \
(table.location_id == location) & \
(table.date == date)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# END =========================================================================
| mit |
shermanng10/superathletebuilder | env/lib/python2.7/site-packages/pip/download.py | 279 | 31936 | from __future__ import absolute_import
import cgi
import email.utils
import hashlib
import getpass
import json
import logging
import mimetypes
import os
import platform
import re
import shutil
import sys
import tempfile
try:
import ssl # noqa
HAS_TLS = True
except ImportError:
HAS_TLS = False
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
import pip
from pip.exceptions import InstallationError, HashMismatch
from pip.models import PyPI
from pip.utils import (splitext, rmtree, format_size, display_path,
backup_dir, ask_path_exists, unpack_file,
call_subprocess, ARCHIVE_EXTENSIONS)
from pip.utils.filesystem import check_path_owner
from pip.utils.logging import indent_log
from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
from pip.locations import write_delete_marker_file
from pip.vcs import vcs
from pip._vendor import requests, six
from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter
from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
from pip._vendor.requests.models import Response
from pip._vendor.requests.structures import CaseInsensitiveDict
from pip._vendor.requests.packages import urllib3
from pip._vendor.cachecontrol import CacheControlAdapter
from pip._vendor.cachecontrol.caches import FileCache
from pip._vendor.lockfile import LockError
from pip._vendor.six.moves import xmlrpc_client
__all__ = ['get_file_content',
'is_url', 'url_to_path', 'path_to_url',
'is_archive_file', 'unpack_vcs_link',
'unpack_file_url', 'is_vcs_url', 'is_file_url',
'unpack_http_url', 'unpack_url']
logger = logging.getLogger(__name__)
def user_agent():
"""
Return a string representing the user agent.
"""
data = {
"installer": {"name": "pip", "version": pip.__version__},
"python": platform.python_version(),
"implementation": {
"name": platform.python_implementation(),
},
}
if data["implementation"]["name"] == 'CPython':
data["implementation"]["version"] = platform.python_version()
elif data["implementation"]["name"] == 'PyPy':
if sys.pypy_version_info.releaselevel == 'final':
pypy_version_info = sys.pypy_version_info[:3]
else:
pypy_version_info = sys.pypy_version_info
data["implementation"]["version"] = ".".join(
[str(x) for x in pypy_version_info]
)
elif data["implementation"]["name"] == 'Jython':
# Complete Guess
data["implementation"]["version"] = platform.python_version()
elif data["implementation"]["name"] == 'IronPython':
# Complete Guess
data["implementation"]["version"] = platform.python_version()
if sys.platform.startswith("linux"):
distro = dict(filter(
lambda x: x[1],
zip(["name", "version", "id"], platform.linux_distribution()),
))
libc = dict(filter(
lambda x: x[1],
zip(["lib", "version"], platform.libc_ver()),
))
if libc:
distro["libc"] = libc
if distro:
data["distro"] = distro
if sys.platform.startswith("darwin") and platform.mac_ver()[0]:
data["distro"] = {"name": "OS X", "version": platform.mac_ver()[0]}
if platform.system():
data.setdefault("system", {})["name"] = platform.system()
if platform.release():
data.setdefault("system", {})["release"] = platform.release()
if platform.machine():
data["cpu"] = platform.machine()
return "{data[installer][name]}/{data[installer][version]} {json}".format(
data=data,
json=json.dumps(data, separators=(",", ":"), sort_keys=True),
)
class MultiDomainBasicAuth(AuthBase):
def __init__(self, prompting=True):
self.prompting = prompting
self.passwords = {}
def __call__(self, req):
parsed = urllib_parse.urlparse(req.url)
# Get the netloc without any embedded credentials
netloc = parsed.netloc.rsplit("@", 1)[-1]
# Set the url of the request to the url without any credentials
req.url = urllib_parse.urlunparse(parsed[:1] + (netloc,) + parsed[2:])
# Use any stored credentials that we have for this netloc
username, password = self.passwords.get(netloc, (None, None))
# Extract credentials embedded in the url if we have none stored
if username is None:
username, password = self.parse_credentials(parsed.netloc)
if username or password:
# Store the username and password
self.passwords[netloc] = (username, password)
# Send the basic auth with this request
req = HTTPBasicAuth(username or "", password or "")(req)
# Attach a hook to handle 401 responses
req.register_hook("response", self.handle_401)
return req
def handle_401(self, resp, **kwargs):
# We only care about 401 responses, anything else we want to just
# pass through the actual response
if resp.status_code != 401:
return resp
# We are not able to prompt the user so simple return the response
if not self.prompting:
return resp
parsed = urllib_parse.urlparse(resp.url)
# Prompt the user for a new username and password
username = six.moves.input("User for %s: " % parsed.netloc)
password = getpass.getpass("Password: ")
# Store the new username and password to use for future requests
if username or password:
self.passwords[parsed.netloc] = (username, password)
# Consume content and release the original connection to allow our new
# request to reuse the same one.
resp.content
resp.raw.release_conn()
# Add our new username and password to the request
req = HTTPBasicAuth(username or "", password or "")(resp.request)
# Send our new request
new_resp = resp.connection.send(req, **kwargs)
new_resp.history.append(resp)
return new_resp
def parse_credentials(self, netloc):
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)
return userinfo, None
return None, None
class LocalFSAdapter(BaseAdapter):
def send(self, request, stream=None, timeout=None, verify=None, cert=None,
proxies=None):
pathname = url_to_path(request.url)
resp = Response()
resp.status_code = 200
resp.url = request.url
try:
stats = os.stat(pathname)
except OSError as exc:
resp.status_code = 404
resp.raw = exc
else:
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
content_type = mimetypes.guess_type(pathname)[0] or "text/plain"
resp.headers = CaseInsensitiveDict({
"Content-Type": content_type,
"Content-Length": stats.st_size,
"Last-Modified": modified,
})
resp.raw = open(pathname, "rb")
resp.close = resp.raw.close
return resp
def close(self):
pass
class SafeFileCache(FileCache):
"""
A file based cache which is safe to use even when the target directory may
not be accessible or writable.
"""
def __init__(self, *args, **kwargs):
super(SafeFileCache, self).__init__(*args, **kwargs)
# Check to ensure that the directory containing our cache directory
# is owned by the user current executing pip. If it does not exist
# we will check the parent directory until we find one that does exist.
# If it is not owned by the user executing pip then we will disable
# the cache and log a warning.
if not check_path_owner(self.directory):
logger.warning(
"The directory '%s' or its parent directory is not owned by "
"the current user and the cache has been disabled. Please "
"check the permissions and owner of that directory. If "
"executing pip with sudo, you may want sudo's -H flag.",
self.directory,
)
# Set our directory to None to disable the Cache
self.directory = None
def get(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).get(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
def set(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).set(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
def delete(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).delete(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
class InsecureHTTPAdapter(HTTPAdapter):
def cert_verify(self, conn, url, verify, cert):
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
class PipSession(requests.Session):
timeout = None
def __init__(self, *args, **kwargs):
retries = kwargs.pop("retries", 0)
cache = kwargs.pop("cache", None)
insecure_hosts = kwargs.pop("insecure_hosts", [])
super(PipSession, self).__init__(*args, **kwargs)
# Attach our User Agent to the request
self.headers["User-Agent"] = user_agent()
# Attach our Authentication handler to the session
self.auth = MultiDomainBasicAuth()
# Create our urllib3.Retry instance which will allow us to customize
# how we handle retries.
retries = urllib3.Retry(
# Set the total number of retries that a particular request can
# have.
total=retries,
# A 503 error from PyPI typically means that the Fastly -> Origin
# connection got interupted in some way. A 503 error in general
# is typically considered a transient error so we'll go ahead and
# retry it.
status_forcelist=[503],
# Add a small amount of back off between failed requests in
# order to prevent hammering the service.
backoff_factor=0.25,
)
# We want to _only_ cache responses on securely fetched origins. We do
# this because we can't validate the response of an insecurely fetched
# origin, and we don't want someone to be able to poison the cache and
# require manual evication from the cache to fix it.
if cache:
secure_adapter = CacheControlAdapter(
cache=SafeFileCache(cache, use_dir_lock=True),
max_retries=retries,
)
else:
secure_adapter = HTTPAdapter(max_retries=retries)
# Our Insecure HTTPAdapter disables HTTPS validation. It does not
# support caching (see above) so we'll use it for all http:// URLs as
# well as any https:// host that we've marked as ignoring TLS errors
# for.
insecure_adapter = InsecureHTTPAdapter(max_retries=retries)
self.mount("https://", secure_adapter)
self.mount("http://", insecure_adapter)
# Enable file:// urls
self.mount("file://", LocalFSAdapter())
# We want to use a non-validating adapter for any requests which are
# deemed insecure.
for host in insecure_hosts:
self.mount("https://{0}/".format(host), insecure_adapter)
def request(self, method, url, *args, **kwargs):
# Allow setting a default timeout on a session
kwargs.setdefault("timeout", self.timeout)
# Dispatch the actual request
return super(PipSession, self).request(method, url, *args, **kwargs)
def get_file_content(url, comes_from=None, session=None):
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode."""
if session is None:
raise TypeError(
"get_file_content() missing 1 required keyword argument: 'session'"
)
match = _scheme_re.search(url)
if match:
scheme = match.group(1).lower()
if (scheme == 'file' and comes_from and
comes_from.startswith('http')):
raise InstallationError(
'Requirements file %s references URL %s, which is local'
% (comes_from, url))
if scheme == 'file':
path = url.split(':', 1)[1]
path = path.replace('\\', '/')
match = _url_slash_drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib_parse.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
else:
# FIXME: catch some errors
resp = session.get(url)
resp.raise_for_status()
if six.PY3:
return resp.url, resp.text
else:
return resp.url, resp.content
try:
with open(url) as f:
content = f.read()
except IOError as exc:
raise InstallationError(
'Could not open requirements file: %s' % str(exc)
)
return url, content
_scheme_re = re.compile(r'^(http|https|file):', re.I)
_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
def is_url(name):
"""Returns true if the name looks like a URL"""
if ':' not in name:
return False
scheme = name.split(':', 1)[0].lower()
return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes
def url_to_path(url):
"""
Convert a file: URL to a path.
"""
assert url.startswith('file:'), (
"You can only turn file: urls into filenames (not %r)" % url)
_, netloc, path, _, _ = urllib_parse.urlsplit(url)
# if we have a UNC path, prepend UNC share notation
if netloc:
netloc = '\\\\' + netloc
path = urllib_request.url2pathname(netloc + path)
return path
def path_to_url(path):
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
url = urllib_parse.urljoin('file:', urllib_request.pathname2url(path))
return url
def is_archive_file(name):
"""Return True if `name` is a considered as an archive file."""
ext = splitext(name)[1].lower()
if ext in ARCHIVE_EXTENSIONS:
return True
return False
def unpack_vcs_link(link, location):
vcs_backend = _get_used_vcs_backend(link)
vcs_backend.unpack(location)
def _get_used_vcs_backend(link):
for backend in vcs.backends:
if link.scheme in backend.schemes:
vcs_backend = backend(link.url)
return vcs_backend
def is_vcs_url(link):
return bool(_get_used_vcs_backend(link))
def is_file_url(link):
return link.url.lower().startswith('file:')
def _check_hash(download_hash, link):
if download_hash.digest_size != hashlib.new(link.hash_name).digest_size:
logger.critical(
"Hash digest size of the package %d (%s) doesn't match the "
"expected hash name %s!",
download_hash.digest_size, link, link.hash_name,
)
raise HashMismatch('Hash name mismatch for package %s' % link)
if download_hash.hexdigest() != link.hash:
logger.critical(
"Hash of the package %s (%s) doesn't match the expected hash %s!",
link, download_hash.hexdigest(), link.hash,
)
raise HashMismatch(
'Bad %s hash for package %s' % (link.hash_name, link)
)
def _get_hash_from_file(target_file, link):
try:
download_hash = hashlib.new(link.hash_name)
except (ValueError, TypeError):
logger.warning(
"Unsupported hash name %s for package %s", link.hash_name, link,
)
return None
with open(target_file, 'rb') as fp:
while True:
chunk = fp.read(4096)
if not chunk:
break
download_hash.update(chunk)
return download_hash
def _progress_indicator(iterable, *args, **kwargs):
return iterable
def _download_url(resp, link, content_file):
download_hash = None
if link.hash and link.hash_name:
try:
download_hash = hashlib.new(link.hash_name)
except ValueError:
logger.warning(
"Unsupported hash name %s for package %s",
link.hash_name, link,
)
try:
total_length = int(resp.headers['content-length'])
except (ValueError, KeyError, TypeError):
total_length = 0
cached_resp = getattr(resp, "from_cache", False)
if logger.getEffectiveLevel() > logging.INFO:
show_progress = False
elif cached_resp:
show_progress = False
elif total_length > (40 * 1000):
show_progress = True
elif not total_length:
show_progress = True
else:
show_progress = False
show_url = link.show_url
def resp_read(chunk_size):
try:
# Special case for urllib3.
for chunk in resp.raw.stream(
chunk_size,
# We use decode_content=False here because we do
# want urllib3 to mess with the raw bytes we get
# from the server. If we decompress inside of
# urllib3 then we cannot verify the checksum
# because the checksum will be of the compressed
# file. This breakage will only occur if the
# server adds a Content-Encoding header, which
# depends on how the server was configured:
# - Some servers will notice that the file isn't a
# compressible file and will leave the file alone
# and with an empty Content-Encoding
# - Some servers will notice that the file is
# already compressed and will leave the file
# alone and will add a Content-Encoding: gzip
# header
# - Some servers won't notice anything at all and
# will take a file that's already been compressed
# and compress it again and set the
# Content-Encoding: gzip header
#
# By setting this not to decode automatically we
# hope to eliminate problems with the second case.
decode_content=False):
yield chunk
except AttributeError:
# Standard file-like object.
while True:
chunk = resp.raw.read(chunk_size)
if not chunk:
break
yield chunk
progress_indicator = _progress_indicator
if link.netloc == PyPI.netloc:
url = show_url
else:
url = link.url_without_fragment
if show_progress: # We don't show progress on cached responses
if total_length:
logger.info(
"Downloading %s (%s)", url, format_size(total_length),
)
progress_indicator = DownloadProgressBar(
max=total_length,
).iter
else:
logger.info("Downloading %s", url)
progress_indicator = DownloadProgressSpinner().iter
elif cached_resp:
logger.info("Using cached %s", url)
else:
logger.info("Downloading %s", url)
logger.debug('Downloading from URL %s', link)
for chunk in progress_indicator(resp_read(4096), 4096):
if download_hash is not None:
download_hash.update(chunk)
content_file.write(chunk)
if link.hash and link.hash_name:
_check_hash(download_hash, link)
return download_hash
def _copy_file(filename, location, content_type, link):
copy = True
download_location = os.path.join(location, link.filename)
if os.path.exists(download_location):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
display_path(download_location), ('i', 'w', 'b'))
if response == 'i':
copy = False
elif response == 'w':
logger.warning('Deleting %s', display_path(download_location))
os.remove(download_location)
elif response == 'b':
dest_file = backup_dir(download_location)
logger.warning(
'Backing up %s to %s',
display_path(download_location),
display_path(dest_file),
)
shutil.move(download_location, dest_file)
if copy:
shutil.copy(filename, download_location)
logger.info('Saved %s', display_path(download_location))
def unpack_http_url(link, location, download_dir=None, session=None):
if session is None:
raise TypeError(
"unpack_http_url() missing 1 required keyword argument: 'session'"
)
temp_dir = tempfile.mkdtemp('-unpack', 'pip-')
# If a download dir is specified, is the file already downloaded there?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(link, download_dir)
if already_downloaded_path:
from_path = already_downloaded_path
content_type = mimetypes.guess_type(from_path)[0]
else:
# let's download to a tmp dir
from_path, content_type = _download_http_url(link, session, temp_dir)
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified; let's copy the archive there
if download_dir and not already_downloaded_path:
_copy_file(from_path, download_dir, content_type, link)
if not already_downloaded_path:
os.unlink(from_path)
rmtree(temp_dir)
def unpack_file_url(link, location, download_dir=None):
"""Unpack link into location.
If download_dir is provided and link points to a file, make a copy
of the link file inside download_dir."""
link_path = url_to_path(link.url_without_fragment)
# If it's a url to a local directory
if os.path.isdir(link_path):
if os.path.isdir(location):
rmtree(location)
shutil.copytree(link_path, location, symlinks=True)
if download_dir:
logger.info('Link is a directory, ignoring download_dir')
return
# if link has a hash, let's confirm it matches
if link.hash:
link_path_hash = _get_hash_from_file(link_path, link)
_check_hash(link_path_hash, link)
# If a download dir is specified, is the file already there and valid?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(link, download_dir)
if already_downloaded_path:
from_path = already_downloaded_path
else:
from_path = link_path
content_type = mimetypes.guess_type(from_path)[0]
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified and not already downloaded
if download_dir and not already_downloaded_path:
_copy_file(from_path, download_dir, content_type, link)
def _copy_dist_from_dir(link_path, location):
"""Copy distribution files in `link_path` to `location`.
Invoked when user requests to install a local directory. E.g.:
pip install .
pip install ~/dev/git-repos/python-prompt-toolkit
"""
# Note: This is currently VERY SLOW if you have a lot of data in the
# directory, because it copies everything with `shutil.copytree`.
# What it should really do is build an sdist and install that.
# See https://github.com/pypa/pip/issues/2195
if os.path.isdir(location):
rmtree(location)
# build an sdist
setup_py = 'setup.py'
sdist_args = [sys.executable]
sdist_args.append('-c')
sdist_args.append(
"import setuptools, tokenize;__file__=%r;"
"exec(compile(getattr(tokenize, 'open', open)(__file__).read()"
".replace('\\r\\n', '\\n'), __file__, 'exec'))" % setup_py)
sdist_args.append('sdist')
sdist_args += ['--dist-dir', location]
logger.info('Running setup.py sdist for %s', link_path)
with indent_log():
call_subprocess(sdist_args, cwd=link_path, show_stdout=False)
# unpack sdist into `location`
sdist = os.path.join(location, os.listdir(location)[0])
logger.info('Unpacking sdist %s into %s', sdist, location)
unpack_file(sdist, location, content_type=None, link=None)
class PipXmlrpcTransport(xmlrpc_client.Transport):
"""Provide a `xmlrpclib.Transport` implementation via a `PipSession`
object.
"""
def __init__(self, index_url, session, use_datetime=False):
xmlrpc_client.Transport.__init__(self, use_datetime)
index_parts = urllib_parse.urlparse(index_url)
self._scheme = index_parts.scheme
self._session = session
def request(self, host, handler, request_body, verbose=False):
parts = (self._scheme, host, handler, None, None, None)
url = urllib_parse.urlunparse(parts)
try:
headers = {'Content-Type': 'text/xml'}
response = self._session.post(url, data=request_body,
headers=headers, stream=True)
response.raise_for_status()
self.verbose = verbose
return self.parse_response(response.raw)
except requests.HTTPError as exc:
logger.critical(
"HTTP error %s while getting %s",
exc.response.status_code, url,
)
raise
def unpack_url(link, location, download_dir=None,
only_download=False, session=None):
"""Unpack link.
If link is a VCS link:
if only_download, export into download_dir and ignore location
else unpack into location
for other types of link:
- unpack into location
- if download_dir, copy the file into download_dir
- if only_download, mark location for deletion
"""
# non-editable vcs urls
if is_vcs_url(link):
unpack_vcs_link(link, location)
# file urls
elif is_file_url(link):
unpack_file_url(link, location, download_dir)
# http urls
else:
if session is None:
session = PipSession()
unpack_http_url(
link,
location,
download_dir,
session,
)
if only_download:
write_delete_marker_file(location)
def _download_http_url(link, session, temp_dir):
"""Download link url into temp_dir using provided session"""
target_url = link.url.split('#', 1)[0]
try:
resp = session.get(
target_url,
# We use Accept-Encoding: identity here because requests
# defaults to accepting compressed responses. This breaks in
# a variety of ways depending on how the server is configured.
# - Some servers will notice that the file isn't a compressible
# file and will leave the file alone and with an empty
# Content-Encoding
# - Some servers will notice that the file is already
# compressed and will leave the file alone and will add a
# Content-Encoding: gzip header
# - Some servers won't notice anything at all and will take
# a file that's already been compressed and compress it again
# and set the Content-Encoding: gzip header
# By setting this to request only the identity encoding We're
# hoping to eliminate the third case. Hopefully there does not
# exist a server which when given a file will notice it is
# already compressed and that you're not asking for a
# compressed file and will then decompress it before sending
# because if that's the case I don't think it'll ever be
# possible to make this work.
headers={"Accept-Encoding": "identity"},
stream=True,
)
resp.raise_for_status()
except requests.HTTPError as exc:
logger.critical(
"HTTP error %s while getting %s", exc.response.status_code, link,
)
raise
content_type = resp.headers.get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess
content_disposition = resp.headers.get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param.
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != resp.url:
ext = os.path.splitext(resp.url)[1]
if ext:
filename += ext
file_path = os.path.join(temp_dir, filename)
with open(file_path, 'wb') as content_file:
_download_url(resp, link, content_file)
return file_path, content_type
def _check_download_dir(link, download_dir):
""" Check download_dir for previously downloaded file with correct hash
If a correct file is found return its path else None
"""
download_path = os.path.join(download_dir, link.filename)
if os.path.exists(download_path):
# If already downloaded, does its hash match?
logger.info('File was already downloaded %s', download_path)
if link.hash:
download_hash = _get_hash_from_file(download_path, link)
try:
_check_hash(download_hash, link)
except HashMismatch:
logger.warning(
'Previously-downloaded file %s has bad hash, '
're-downloading.',
download_path
)
os.unlink(download_path)
return None
return download_path
return None
| mit |
deepakkv07/Implementation-of-UDP-Lite-in-ns-3 | src/core/bindings/modulegen__gcc_ILP32.py | 4 | 321492 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.core', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## log.h (module 'core'): ns3::LogLevel [enumeration]
module.add_enum('LogLevel', ['LOG_NONE', 'LOG_ERROR', 'LOG_LEVEL_ERROR', 'LOG_WARN', 'LOG_LEVEL_WARN', 'LOG_DEBUG', 'LOG_LEVEL_DEBUG', 'LOG_INFO', 'LOG_LEVEL_INFO', 'LOG_FUNCTION', 'LOG_LEVEL_FUNCTION', 'LOG_LOGIC', 'LOG_LEVEL_LOGIC', 'LOG_ALL', 'LOG_LEVEL_ALL', 'LOG_PREFIX_FUNC', 'LOG_PREFIX_TIME', 'LOG_PREFIX_NODE', 'LOG_PREFIX_LEVEL', 'LOG_PREFIX_ALL'])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', outer_class=root_module['ns3::AttributeConstructionList'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase')
## command-line.h (module 'core'): ns3::CommandLine [class]
module.add_class('CommandLine', allow_subclassing=True)
## system-mutex.h (module 'core'): ns3::CriticalSection [class]
module.add_class('CriticalSection')
## event-garbage-collector.h (module 'core'): ns3::EventGarbageCollector [class]
module.add_class('EventGarbageCollector')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId')
## global-value.h (module 'core'): ns3::GlobalValue [class]
module.add_class('GlobalValue')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher')
## int-to-type.h (module 'core'): ns3::IntToType<0> [struct]
module.add_class('IntToType', template_parameters=['0'])
## int-to-type.h (module 'core'): ns3::IntToType<0>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 0 >'])
## int-to-type.h (module 'core'): ns3::IntToType<1> [struct]
module.add_class('IntToType', template_parameters=['1'])
## int-to-type.h (module 'core'): ns3::IntToType<1>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 1 >'])
## int-to-type.h (module 'core'): ns3::IntToType<2> [struct]
module.add_class('IntToType', template_parameters=['2'])
## int-to-type.h (module 'core'): ns3::IntToType<2>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 2 >'])
## int-to-type.h (module 'core'): ns3::IntToType<3> [struct]
module.add_class('IntToType', template_parameters=['3'])
## int-to-type.h (module 'core'): ns3::IntToType<3>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 3 >'])
## int-to-type.h (module 'core'): ns3::IntToType<4> [struct]
module.add_class('IntToType', template_parameters=['4'])
## int-to-type.h (module 'core'): ns3::IntToType<4>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 4 >'])
## int-to-type.h (module 'core'): ns3::IntToType<5> [struct]
module.add_class('IntToType', template_parameters=['5'])
## int-to-type.h (module 'core'): ns3::IntToType<5>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 5 >'])
## int-to-type.h (module 'core'): ns3::IntToType<6> [struct]
module.add_class('IntToType', template_parameters=['6'])
## int-to-type.h (module 'core'): ns3::IntToType<6>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 6 >'])
## log.h (module 'core'): ns3::LogComponent [class]
module.add_class('LogComponent')
## names.h (module 'core'): ns3::Names [class]
module.add_class('Names')
## non-copyable.h (module 'core'): ns3::NonCopyable [class]
module.add_class('NonCopyable', destructor_visibility='protected')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True)
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory')
## log.h (module 'core'): ns3::ParameterLogger [class]
module.add_class('ParameterLogger')
## random-variable-stream-helper.h (module 'core'): ns3::RandomVariableStreamHelper [class]
module.add_class('RandomVariableStreamHelper')
## rng-seed-manager.h (module 'core'): ns3::RngSeedManager [class]
module.add_class('RngSeedManager')
## rng-stream.h (module 'core'): ns3::RngStream [class]
module.add_class('RngStream')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simulator.h (module 'core'): ns3::Simulator [class]
module.add_class('Simulator', destructor_visibility='private')
## simulator.h (module 'core'): ns3::Simulator [enumeration]
module.add_enum('', ['NO_CONTEXT'], outer_class=root_module['ns3::Simulator'])
## singleton.h (module 'core'): ns3::Singleton<ns3::DesMetrics> [class]
module.add_class('Singleton', template_parameters=['ns3::DesMetrics'], parent=root_module['ns3::NonCopyable'])
## system-condition.h (module 'core'): ns3::SystemCondition [class]
module.add_class('SystemCondition')
## system-mutex.h (module 'core'): ns3::SystemMutex [class]
module.add_class('SystemMutex')
## system-wall-clock-ms.h (module 'core'): ns3::SystemWallClockMs [class]
module.add_class('SystemWallClockMs')
## nstime.h (module 'core'): ns3::TimeWithUnit [class]
module.add_class('TimeWithUnit')
## timer.h (module 'core'): ns3::Timer [class]
module.add_class('Timer')
## timer.h (module 'core'): ns3::Timer::DestroyPolicy [enumeration]
module.add_enum('DestroyPolicy', ['CANCEL_ON_DESTROY', 'REMOVE_ON_DESTROY', 'CHECK_ON_DESTROY'], outer_class=root_module['ns3::Timer'])
## timer.h (module 'core'): ns3::Timer::State [enumeration]
module.add_enum('State', ['RUNNING', 'EXPIRED', 'SUSPENDED'], outer_class=root_module['ns3::Timer'])
## timer-impl.h (module 'core'): ns3::TimerImpl [class]
module.add_class('TimerImpl', allow_subclassing=True)
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::SupportLevel [enumeration]
module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', outer_class=root_module['ns3::TypeId'])
## vector.h (module 'core'): ns3::Vector2D [class]
module.add_class('Vector2D')
## vector.h (module 'core'): ns3::Vector3D [class]
module.add_class('Vector3D')
## watchdog.h (module 'core'): ns3::Watchdog [class]
module.add_class('Watchdog')
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t')
## int64x64-double.h (module 'core'): ns3::int64x64_t::impl_type [enumeration]
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'])
## des-metrics.h (module 'core'): ns3::DesMetrics [class]
module.add_class('DesMetrics', parent=root_module['ns3::Singleton< ns3::DesMetrics >'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', outer_class=root_module['ns3::Object'])
## random-variable-stream.h (module 'core'): ns3::RandomVariableStream [class]
module.add_class('RandomVariableStream', parent=root_module['ns3::Object'])
## scheduler.h (module 'core'): ns3::Scheduler [class]
module.add_class('Scheduler', parent=root_module['ns3::Object'])
## scheduler.h (module 'core'): ns3::Scheduler::Event [struct]
module.add_class('Event', outer_class=root_module['ns3::Scheduler'])
## scheduler.h (module 'core'): ns3::Scheduler::EventKey [struct]
module.add_class('EventKey', outer_class=root_module['ns3::Scheduler'])
## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable [class]
module.add_class('SequentialRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::FdReader', 'ns3::empty', 'ns3::DefaultDeleter<ns3::FdReader>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::RefCountBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::RefCountBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::SystemThread', 'ns3::empty', 'ns3::DefaultDeleter<ns3::SystemThread>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simulator-impl.h (module 'core'): ns3::SimulatorImpl [class]
module.add_class('SimulatorImpl', parent=root_module['ns3::Object'])
## synchronizer.h (module 'core'): ns3::Synchronizer [class]
module.add_class('Synchronizer', parent=root_module['ns3::Object'])
## system-thread.h (module 'core'): ns3::SystemThread [class]
module.add_class('SystemThread', parent=root_module['ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >'])
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'])
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable [class]
module.add_class('TriangularRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable [class]
module.add_class('UniformRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## wall-clock-synchronizer.h (module 'core'): ns3::WallClockSynchronizer [class]
module.add_class('WallClockSynchronizer', parent=root_module['ns3::Synchronizer'])
## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable [class]
module.add_class('WeibullRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable [class]
module.add_class('ZetaRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable [class]
module.add_class('ZipfRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## boolean.h (module 'core'): ns3::BooleanChecker [class]
module.add_class('BooleanChecker', parent=root_module['ns3::AttributeChecker'])
## boolean.h (module 'core'): ns3::BooleanValue [class]
module.add_class('BooleanValue', parent=root_module['ns3::AttributeValue'])
## calendar-scheduler.h (module 'core'): ns3::CalendarScheduler [class]
module.add_class('CalendarScheduler', parent=root_module['ns3::Scheduler'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable [class]
module.add_class('ConstantRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## default-simulator-impl.h (module 'core'): ns3::DefaultSimulatorImpl [class]
module.add_class('DefaultSimulatorImpl', parent=root_module['ns3::SimulatorImpl'])
## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable [class]
module.add_class('DeterministicRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## double.h (module 'core'): ns3::DoubleValue [class]
module.add_class('DoubleValue', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable [class]
module.add_class('EmpiricalRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor [class]
module.add_class('EmptyAttributeAccessor', parent=root_module['ns3::AttributeAccessor'])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker [class]
module.add_class('EmptyAttributeChecker', parent=root_module['ns3::AttributeChecker'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', parent=root_module['ns3::AttributeValue'])
## enum.h (module 'core'): ns3::EnumChecker [class]
module.add_class('EnumChecker', parent=root_module['ns3::AttributeChecker'])
## enum.h (module 'core'): ns3::EnumValue [class]
module.add_class('EnumValue', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable [class]
module.add_class('ErlangRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## event-impl.h (module 'core'): ns3::EventImpl [class]
module.add_class('EventImpl', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable [class]
module.add_class('ExponentialRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## unix-fd-reader.h (module 'core'): ns3::FdReader [class]
module.add_class('FdReader', parent=root_module['ns3::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >'])
## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable [class]
module.add_class('GammaRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## heap-scheduler.h (module 'core'): ns3::HeapScheduler [class]
module.add_class('HeapScheduler', parent=root_module['ns3::Scheduler'])
## integer.h (module 'core'): ns3::IntegerValue [class]
module.add_class('IntegerValue', parent=root_module['ns3::AttributeValue'])
## list-scheduler.h (module 'core'): ns3::ListScheduler [class]
module.add_class('ListScheduler', parent=root_module['ns3::Scheduler'])
## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable [class]
module.add_class('LogNormalRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## map-scheduler.h (module 'core'): ns3::MapScheduler [class]
module.add_class('MapScheduler', parent=root_module['ns3::Scheduler'])
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable [class]
module.add_class('NormalRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', parent=root_module['ns3::AttributeValue'])
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerAccessor [class]
module.add_class('ObjectPtrContainerAccessor', parent=root_module['ns3::AttributeAccessor'])
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerChecker [class]
module.add_class('ObjectPtrContainerChecker', parent=root_module['ns3::AttributeChecker'])
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerValue [class]
module.add_class('ObjectPtrContainerValue', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable [class]
module.add_class('ParetoRandomVariable', parent=root_module['ns3::RandomVariableStream'])
## pointer.h (module 'core'): ns3::PointerChecker [class]
module.add_class('PointerChecker', parent=root_module['ns3::AttributeChecker'])
## pointer.h (module 'core'): ns3::PointerValue [class]
module.add_class('PointerValue', parent=root_module['ns3::AttributeValue'])
## realtime-simulator-impl.h (module 'core'): ns3::RealtimeSimulatorImpl [class]
module.add_class('RealtimeSimulatorImpl', parent=root_module['ns3::SimulatorImpl'])
## realtime-simulator-impl.h (module 'core'): ns3::RealtimeSimulatorImpl::SynchronizationMode [enumeration]
module.add_enum('SynchronizationMode', ['SYNC_BEST_EFFORT', 'SYNC_HARD_LIMIT'], outer_class=root_module['ns3::RealtimeSimulatorImpl'])
## ref-count-base.h (module 'core'): ns3::RefCountBase [class]
module.add_class('RefCountBase', parent=root_module['ns3::SimpleRefCount< ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> >'])
## string.h (module 'core'): ns3::StringChecker [class]
module.add_class('StringChecker', parent=root_module['ns3::AttributeChecker'])
## string.h (module 'core'): ns3::StringValue [class]
module.add_class('StringValue', parent=root_module['ns3::AttributeValue'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', parent=root_module['ns3::AttributeValue'])
## uinteger.h (module 'core'): ns3::UintegerValue [class]
module.add_class('UintegerValue', parent=root_module['ns3::AttributeValue'])
## vector.h (module 'core'): ns3::Vector2DChecker [class]
module.add_class('Vector2DChecker', parent=root_module['ns3::AttributeChecker'])
## vector.h (module 'core'): ns3::Vector2DValue [class]
module.add_class('Vector2DValue', parent=root_module['ns3::AttributeValue'])
## vector.h (module 'core'): ns3::Vector3DChecker [class]
module.add_class('Vector3DChecker', parent=root_module['ns3::AttributeChecker'])
## vector.h (module 'core'): ns3::Vector3DValue [class]
module.add_class('Vector3DValue', parent=root_module['ns3::AttributeValue'])
module.add_container('std::map< std::string, ns3::LogComponent * >', ('std::string', 'ns3::LogComponent *'), container_type=u'map')
typehandlers.add_type_alias(u'ns3::RngSeedManager', u'ns3::SeedManager')
typehandlers.add_type_alias(u'ns3::RngSeedManager*', u'ns3::SeedManager*')
typehandlers.add_type_alias(u'ns3::RngSeedManager&', u'ns3::SeedManager&')
module.add_typedef(root_module['ns3::RngSeedManager'], 'SeedManager')
typehandlers.add_type_alias(u'ns3::ObjectPtrContainerValue', u'ns3::ObjectVectorValue')
typehandlers.add_type_alias(u'ns3::ObjectPtrContainerValue*', u'ns3::ObjectVectorValue*')
typehandlers.add_type_alias(u'ns3::ObjectPtrContainerValue&', u'ns3::ObjectVectorValue&')
module.add_typedef(root_module['ns3::ObjectPtrContainerValue'], 'ObjectVectorValue')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & ) *', u'ns3::LogTimePrinter')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & ) **', u'ns3::LogTimePrinter*')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & ) *&', u'ns3::LogTimePrinter&')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & ) *', u'ns3::LogNodePrinter')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & ) **', u'ns3::LogNodePrinter*')
typehandlers.add_type_alias(u'void ( * ) ( std::ostream & ) *&', u'ns3::LogNodePrinter&')
typehandlers.add_type_alias(u'ns3::Vector3D', u'ns3::Vector')
typehandlers.add_type_alias(u'ns3::Vector3D*', u'ns3::Vector*')
typehandlers.add_type_alias(u'ns3::Vector3D&', u'ns3::Vector&')
module.add_typedef(root_module['ns3::Vector3D'], 'Vector')
typehandlers.add_type_alias(u'ns3::Vector3DValue', u'ns3::VectorValue')
typehandlers.add_type_alias(u'ns3::Vector3DValue*', u'ns3::VectorValue*')
typehandlers.add_type_alias(u'ns3::Vector3DValue&', u'ns3::VectorValue&')
module.add_typedef(root_module['ns3::Vector3DValue'], 'VectorValue')
typehandlers.add_type_alias(u'ns3::ObjectPtrContainerValue', u'ns3::ObjectMapValue')
typehandlers.add_type_alias(u'ns3::ObjectPtrContainerValue*', u'ns3::ObjectMapValue*')
typehandlers.add_type_alias(u'ns3::ObjectPtrContainerValue&', u'ns3::ObjectMapValue&')
module.add_typedef(root_module['ns3::ObjectPtrContainerValue'], 'ObjectMapValue')
typehandlers.add_type_alias(u'ns3::Vector3DChecker', u'ns3::VectorChecker')
typehandlers.add_type_alias(u'ns3::Vector3DChecker*', u'ns3::VectorChecker*')
typehandlers.add_type_alias(u'ns3::Vector3DChecker&', u'ns3::VectorChecker&')
module.add_typedef(root_module['ns3::Vector3DChecker'], 'VectorChecker')
## Register a nested module for the namespace CommandLineHelper
nested_module = module.add_cpp_namespace('CommandLineHelper')
register_types_ns3_CommandLineHelper(nested_module)
## Register a nested module for the namespace Config
nested_module = module.add_cpp_namespace('Config')
register_types_ns3_Config(nested_module)
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
## Register a nested module for the namespace SystemPath
nested_module = module.add_cpp_namespace('SystemPath')
register_types_ns3_SystemPath(nested_module)
## Register a nested module for the namespace TracedValueCallback
nested_module = module.add_cpp_namespace('TracedValueCallback')
register_types_ns3_TracedValueCallback(nested_module)
## Register a nested module for the namespace internal
nested_module = module.add_cpp_namespace('internal')
register_types_ns3_internal(nested_module)
def register_types_ns3_CommandLineHelper(module):
root_module = module.get_root()
def register_types_ns3_Config(module):
root_module = module.get_root()
## config.h (module 'core'): ns3::Config::MatchContainer [class]
module.add_class('MatchContainer')
module.add_container('std::vector< ns3::Ptr< ns3::Object > >', 'ns3::Ptr< ns3::Object >', container_type=u'vector')
module.add_container('std::vector< std::string >', 'std::string', container_type=u'vector')
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', parent=root_module['ns3::Hash::Implementation'])
def register_types_ns3_SystemPath(module):
root_module = module.get_root()
module.add_container('std::list< std::string >', 'std::string', container_type=u'list')
def register_types_ns3_TracedValueCallback(module):
root_module = module.get_root()
typehandlers.add_type_alias(u'void ( * ) ( uint8_t, uint8_t ) *', u'ns3::TracedValueCallback::Uint8')
typehandlers.add_type_alias(u'void ( * ) ( uint8_t, uint8_t ) **', u'ns3::TracedValueCallback::Uint8*')
typehandlers.add_type_alias(u'void ( * ) ( uint8_t, uint8_t ) *&', u'ns3::TracedValueCallback::Uint8&')
typehandlers.add_type_alias(u'void ( * ) ( int8_t, int8_t ) *', u'ns3::TracedValueCallback::Int8')
typehandlers.add_type_alias(u'void ( * ) ( int8_t, int8_t ) **', u'ns3::TracedValueCallback::Int8*')
typehandlers.add_type_alias(u'void ( * ) ( int8_t, int8_t ) *&', u'ns3::TracedValueCallback::Int8&')
typehandlers.add_type_alias(u'void ( * ) ( double, double ) *', u'ns3::TracedValueCallback::Double')
typehandlers.add_type_alias(u'void ( * ) ( double, double ) **', u'ns3::TracedValueCallback::Double*')
typehandlers.add_type_alias(u'void ( * ) ( double, double ) *&', u'ns3::TracedValueCallback::Double&')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t ) *', u'ns3::TracedValueCallback::Uint32')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t ) **', u'ns3::TracedValueCallback::Uint32*')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t ) *&', u'ns3::TracedValueCallback::Uint32&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *', u'ns3::TracedValueCallback::Time')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) **', u'ns3::TracedValueCallback::Time*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *&', u'ns3::TracedValueCallback::Time&')
typehandlers.add_type_alias(u'void ( * ) ( bool, bool ) *', u'ns3::TracedValueCallback::Bool')
typehandlers.add_type_alias(u'void ( * ) ( bool, bool ) **', u'ns3::TracedValueCallback::Bool*')
typehandlers.add_type_alias(u'void ( * ) ( bool, bool ) *&', u'ns3::TracedValueCallback::Bool&')
typehandlers.add_type_alias(u'void ( * ) ( int16_t, int16_t ) *', u'ns3::TracedValueCallback::Int16')
typehandlers.add_type_alias(u'void ( * ) ( int16_t, int16_t ) **', u'ns3::TracedValueCallback::Int16*')
typehandlers.add_type_alias(u'void ( * ) ( int16_t, int16_t ) *&', u'ns3::TracedValueCallback::Int16&')
typehandlers.add_type_alias(u'void ( * ) ( int32_t, int32_t ) *', u'ns3::TracedValueCallback::Int32')
typehandlers.add_type_alias(u'void ( * ) ( int32_t, int32_t ) **', u'ns3::TracedValueCallback::Int32*')
typehandlers.add_type_alias(u'void ( * ) ( int32_t, int32_t ) *&', u'ns3::TracedValueCallback::Int32&')
typehandlers.add_type_alias(u'void ( * ) ( ) *', u'ns3::TracedValueCallback::Void')
typehandlers.add_type_alias(u'void ( * ) ( ) **', u'ns3::TracedValueCallback::Void*')
typehandlers.add_type_alias(u'void ( * ) ( ) *&', u'ns3::TracedValueCallback::Void&')
typehandlers.add_type_alias(u'void ( * ) ( uint16_t, uint16_t ) *', u'ns3::TracedValueCallback::Uint16')
typehandlers.add_type_alias(u'void ( * ) ( uint16_t, uint16_t ) **', u'ns3::TracedValueCallback::Uint16*')
typehandlers.add_type_alias(u'void ( * ) ( uint16_t, uint16_t ) *&', u'ns3::TracedValueCallback::Uint16&')
def register_types_ns3_internal(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3CommandLine_methods(root_module, root_module['ns3::CommandLine'])
register_Ns3CriticalSection_methods(root_module, root_module['ns3::CriticalSection'])
register_Ns3EventGarbageCollector_methods(root_module, root_module['ns3::EventGarbageCollector'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3GlobalValue_methods(root_module, root_module['ns3::GlobalValue'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3IntToType__0_methods(root_module, root_module['ns3::IntToType< 0 >'])
register_Ns3IntToType__1_methods(root_module, root_module['ns3::IntToType< 1 >'])
register_Ns3IntToType__2_methods(root_module, root_module['ns3::IntToType< 2 >'])
register_Ns3IntToType__3_methods(root_module, root_module['ns3::IntToType< 3 >'])
register_Ns3IntToType__4_methods(root_module, root_module['ns3::IntToType< 4 >'])
register_Ns3IntToType__5_methods(root_module, root_module['ns3::IntToType< 5 >'])
register_Ns3IntToType__6_methods(root_module, root_module['ns3::IntToType< 6 >'])
register_Ns3LogComponent_methods(root_module, root_module['ns3::LogComponent'])
register_Ns3Names_methods(root_module, root_module['ns3::Names'])
register_Ns3NonCopyable_methods(root_module, root_module['ns3::NonCopyable'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3ParameterLogger_methods(root_module, root_module['ns3::ParameterLogger'])
register_Ns3RandomVariableStreamHelper_methods(root_module, root_module['ns3::RandomVariableStreamHelper'])
register_Ns3RngSeedManager_methods(root_module, root_module['ns3::RngSeedManager'])
register_Ns3RngStream_methods(root_module, root_module['ns3::RngStream'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])
register_Ns3Singleton__Ns3DesMetrics_methods(root_module, root_module['ns3::Singleton< ns3::DesMetrics >'])
register_Ns3SystemCondition_methods(root_module, root_module['ns3::SystemCondition'])
register_Ns3SystemMutex_methods(root_module, root_module['ns3::SystemMutex'])
register_Ns3SystemWallClockMs_methods(root_module, root_module['ns3::SystemWallClockMs'])
register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit'])
register_Ns3Timer_methods(root_module, root_module['ns3::Timer'])
register_Ns3TimerImpl_methods(root_module, root_module['ns3::TimerImpl'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Vector2D_methods(root_module, root_module['ns3::Vector2D'])
register_Ns3Vector3D_methods(root_module, root_module['ns3::Vector3D'])
register_Ns3Watchdog_methods(root_module, root_module['ns3::Watchdog'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3DesMetrics_methods(root_module, root_module['ns3::DesMetrics'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3RandomVariableStream_methods(root_module, root_module['ns3::RandomVariableStream'])
register_Ns3Scheduler_methods(root_module, root_module['ns3::Scheduler'])
register_Ns3SchedulerEvent_methods(root_module, root_module['ns3::Scheduler::Event'])
register_Ns3SchedulerEventKey_methods(root_module, root_module['ns3::Scheduler::EventKey'])
register_Ns3SequentialRandomVariable_methods(root_module, root_module['ns3::SequentialRandomVariable'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3FdReader_Ns3Empty_Ns3DefaultDeleter__lt__ns3FdReader__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3RefCountBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3RefCountBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> >'])
register_Ns3SimpleRefCount__Ns3SystemThread_Ns3Empty_Ns3DefaultDeleter__lt__ns3SystemThread__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3SimulatorImpl_methods(root_module, root_module['ns3::SimulatorImpl'])
register_Ns3Synchronizer_methods(root_module, root_module['ns3::Synchronizer'])
register_Ns3SystemThread_methods(root_module, root_module['ns3::SystemThread'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3TriangularRandomVariable_methods(root_module, root_module['ns3::TriangularRandomVariable'])
register_Ns3UniformRandomVariable_methods(root_module, root_module['ns3::UniformRandomVariable'])
register_Ns3WallClockSynchronizer_methods(root_module, root_module['ns3::WallClockSynchronizer'])
register_Ns3WeibullRandomVariable_methods(root_module, root_module['ns3::WeibullRandomVariable'])
register_Ns3ZetaRandomVariable_methods(root_module, root_module['ns3::ZetaRandomVariable'])
register_Ns3ZipfRandomVariable_methods(root_module, root_module['ns3::ZipfRandomVariable'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3BooleanChecker_methods(root_module, root_module['ns3::BooleanChecker'])
register_Ns3BooleanValue_methods(root_module, root_module['ns3::BooleanValue'])
register_Ns3CalendarScheduler_methods(root_module, root_module['ns3::CalendarScheduler'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3ConstantRandomVariable_methods(root_module, root_module['ns3::ConstantRandomVariable'])
register_Ns3DefaultSimulatorImpl_methods(root_module, root_module['ns3::DefaultSimulatorImpl'])
register_Ns3DeterministicRandomVariable_methods(root_module, root_module['ns3::DeterministicRandomVariable'])
register_Ns3DoubleValue_methods(root_module, root_module['ns3::DoubleValue'])
register_Ns3EmpiricalRandomVariable_methods(root_module, root_module['ns3::EmpiricalRandomVariable'])
register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor'])
register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EnumChecker_methods(root_module, root_module['ns3::EnumChecker'])
register_Ns3EnumValue_methods(root_module, root_module['ns3::EnumValue'])
register_Ns3ErlangRandomVariable_methods(root_module, root_module['ns3::ErlangRandomVariable'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3ExponentialRandomVariable_methods(root_module, root_module['ns3::ExponentialRandomVariable'])
register_Ns3FdReader_methods(root_module, root_module['ns3::FdReader'])
register_Ns3GammaRandomVariable_methods(root_module, root_module['ns3::GammaRandomVariable'])
register_Ns3HeapScheduler_methods(root_module, root_module['ns3::HeapScheduler'])
register_Ns3IntegerValue_methods(root_module, root_module['ns3::IntegerValue'])
register_Ns3ListScheduler_methods(root_module, root_module['ns3::ListScheduler'])
register_Ns3LogNormalRandomVariable_methods(root_module, root_module['ns3::LogNormalRandomVariable'])
register_Ns3MapScheduler_methods(root_module, root_module['ns3::MapScheduler'])
register_Ns3NormalRandomVariable_methods(root_module, root_module['ns3::NormalRandomVariable'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3ObjectPtrContainerAccessor_methods(root_module, root_module['ns3::ObjectPtrContainerAccessor'])
register_Ns3ObjectPtrContainerChecker_methods(root_module, root_module['ns3::ObjectPtrContainerChecker'])
register_Ns3ObjectPtrContainerValue_methods(root_module, root_module['ns3::ObjectPtrContainerValue'])
register_Ns3ParetoRandomVariable_methods(root_module, root_module['ns3::ParetoRandomVariable'])
register_Ns3PointerChecker_methods(root_module, root_module['ns3::PointerChecker'])
register_Ns3PointerValue_methods(root_module, root_module['ns3::PointerValue'])
register_Ns3RealtimeSimulatorImpl_methods(root_module, root_module['ns3::RealtimeSimulatorImpl'])
register_Ns3RefCountBase_methods(root_module, root_module['ns3::RefCountBase'])
register_Ns3StringChecker_methods(root_module, root_module['ns3::StringChecker'])
register_Ns3StringValue_methods(root_module, root_module['ns3::StringValue'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3UintegerValue_methods(root_module, root_module['ns3::UintegerValue'])
register_Ns3Vector2DChecker_methods(root_module, root_module['ns3::Vector2DChecker'])
register_Ns3Vector2DValue_methods(root_module, root_module['ns3::Vector2DValue'])
register_Ns3Vector3DChecker_methods(root_module, root_module['ns3::Vector3DChecker'])
register_Ns3Vector3DValue_methods(root_module, root_module['ns3::Vector3DValue'])
register_Ns3ConfigMatchContainer_methods(root_module, root_module['ns3::Config::MatchContainer'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
return
def register_Ns3CommandLine_methods(root_module, cls):
cls.add_output_stream_operator()
## command-line.h (module 'core'): ns3::CommandLine::CommandLine() [constructor]
cls.add_constructor([])
## command-line.h (module 'core'): ns3::CommandLine::CommandLine(ns3::CommandLine const & cmd) [copy constructor]
cls.add_constructor([param('ns3::CommandLine const &', 'cmd')])
## command-line.h (module 'core'): void ns3::CommandLine::AddValue(std::string const & name, std::string const & help, ns3::Callback<bool, std::string, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddValue',
'void',
[param('std::string const &', 'name'), param('std::string const &', 'help'), param('ns3::Callback< bool, std::string, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')])
## command-line.h (module 'core'): void ns3::CommandLine::AddValue(std::string const & name, std::string const & attributePath) [member function]
cls.add_method('AddValue',
'void',
[param('std::string const &', 'name'), param('std::string const &', 'attributePath')])
## command-line.h (module 'core'): std::string ns3::CommandLine::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## command-line.h (module 'core'): void ns3::CommandLine::Parse(int argc, char * * argv) [member function]
cls.add_method('Parse',
'void',
[param('int', 'argc'), param('char * *', 'argv')])
## command-line.h (module 'core'): void ns3::CommandLine::PrintHelp(std::ostream & os) const [member function]
cls.add_method('PrintHelp',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## command-line.h (module 'core'): void ns3::CommandLine::Usage(std::string const usage) [member function]
cls.add_method('Usage',
'void',
[param('std::string const', 'usage')])
return
def register_Ns3CriticalSection_methods(root_module, cls):
## system-mutex.h (module 'core'): ns3::CriticalSection::CriticalSection(ns3::CriticalSection const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CriticalSection const &', 'arg0')])
## system-mutex.h (module 'core'): ns3::CriticalSection::CriticalSection(ns3::SystemMutex & mutex) [constructor]
cls.add_constructor([param('ns3::SystemMutex &', 'mutex')])
return
def register_Ns3EventGarbageCollector_methods(root_module, cls):
## event-garbage-collector.h (module 'core'): ns3::EventGarbageCollector::EventGarbageCollector(ns3::EventGarbageCollector const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventGarbageCollector const &', 'arg0')])
## event-garbage-collector.h (module 'core'): ns3::EventGarbageCollector::EventGarbageCollector() [constructor]
cls.add_constructor([])
## event-garbage-collector.h (module 'core'): void ns3::EventGarbageCollector::Track(ns3::EventId event) [member function]
cls.add_method('Track',
'void',
[param('ns3::EventId', 'event')])
return
def register_Ns3EventId_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('==')
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventId const &', 'arg0')])
## event-id.h (module 'core'): ns3::EventId::EventId() [constructor]
cls.add_constructor([])
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])
## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function]
cls.add_method('GetTs',
'uint64_t',
[],
is_const=True)
## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function]
cls.add_method('PeekEventImpl',
'ns3::EventImpl *',
[],
is_const=True)
return
def register_Ns3GlobalValue_methods(root_module, cls):
## global-value.h (module 'core'): ns3::GlobalValue::GlobalValue(ns3::GlobalValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::GlobalValue const &', 'arg0')])
## global-value.h (module 'core'): ns3::GlobalValue::GlobalValue(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeChecker const> checker) [constructor]
cls.add_constructor([param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## global-value.h (module 'core'): static __gnu_cxx::__normal_iterator<ns3::GlobalValue* const*,std::vector<ns3::GlobalValue*, std::allocator<ns3::GlobalValue*> > > ns3::GlobalValue::Begin() [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::GlobalValue * const *, std::vector< ns3::GlobalValue * > >',
[],
is_static=True)
## global-value.h (module 'core'): static void ns3::GlobalValue::Bind(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Bind',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')],
is_static=True)
## global-value.h (module 'core'): static bool ns3::GlobalValue::BindFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('BindFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')],
is_static=True)
## global-value.h (module 'core'): static __gnu_cxx::__normal_iterator<ns3::GlobalValue* const*,std::vector<ns3::GlobalValue*, std::allocator<ns3::GlobalValue*> > > ns3::GlobalValue::End() [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::GlobalValue * const *, std::vector< ns3::GlobalValue * > >',
[],
is_static=True)
## global-value.h (module 'core'): ns3::Ptr<ns3::AttributeChecker const> ns3::GlobalValue::GetChecker() const [member function]
cls.add_method('GetChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[],
is_const=True)
## global-value.h (module 'core'): std::string ns3::GlobalValue::GetHelp() const [member function]
cls.add_method('GetHelp',
'std::string',
[],
is_const=True)
## global-value.h (module 'core'): std::string ns3::GlobalValue::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## global-value.h (module 'core'): void ns3::GlobalValue::GetValue(ns3::AttributeValue & value) const [member function]
cls.add_method('GetValue',
'void',
[param('ns3::AttributeValue &', 'value')],
is_const=True)
## global-value.h (module 'core'): static void ns3::GlobalValue::GetValueByName(std::string name, ns3::AttributeValue & value) [member function]
cls.add_method('GetValueByName',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_static=True)
## global-value.h (module 'core'): static bool ns3::GlobalValue::GetValueByNameFailSafe(std::string name, ns3::AttributeValue & value) [member function]
cls.add_method('GetValueByNameFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_static=True)
## global-value.h (module 'core'): void ns3::GlobalValue::ResetInitialValue() [member function]
cls.add_method('ResetInitialValue',
'void',
[])
## global-value.h (module 'core'): bool ns3::GlobalValue::SetValue(ns3::AttributeValue const & value) [member function]
cls.add_method('SetValue',
'bool',
[param('ns3::AttributeValue const &', 'value')])
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3IntToType__0_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<0>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<0>::IntToType(ns3::IntToType<0> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntToType< 0 > const &', 'arg0')])
return
def register_Ns3IntToType__1_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<1>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<1>::IntToType(ns3::IntToType<1> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntToType< 1 > const &', 'arg0')])
return
def register_Ns3IntToType__2_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<2>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<2>::IntToType(ns3::IntToType<2> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntToType< 2 > const &', 'arg0')])
return
def register_Ns3IntToType__3_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<3>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<3>::IntToType(ns3::IntToType<3> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntToType< 3 > const &', 'arg0')])
return
def register_Ns3IntToType__4_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<4>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<4>::IntToType(ns3::IntToType<4> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntToType< 4 > const &', 'arg0')])
return
def register_Ns3IntToType__5_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<5>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<5>::IntToType(ns3::IntToType<5> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntToType< 5 > const &', 'arg0')])
return
def register_Ns3IntToType__6_methods(root_module, cls):
## int-to-type.h (module 'core'): ns3::IntToType<6>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h (module 'core'): ns3::IntToType<6>::IntToType(ns3::IntToType<6> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntToType< 6 > const &', 'arg0')])
return
def register_Ns3LogComponent_methods(root_module, cls):
## log.h (module 'core'): ns3::LogComponent::LogComponent(ns3::LogComponent const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LogComponent const &', 'arg0')])
## log.h (module 'core'): ns3::LogComponent::LogComponent(std::string const & name, std::string const & file, ns3::LogLevel const mask=::ns3::LOG_NONE) [constructor]
cls.add_constructor([param('std::string const &', 'name'), param('std::string const &', 'file'), param('ns3::LogLevel const', 'mask', default_value='::ns3::LOG_NONE')])
## log.h (module 'core'): void ns3::LogComponent::Disable(ns3::LogLevel const level) [member function]
cls.add_method('Disable',
'void',
[param('ns3::LogLevel const', 'level')])
## log.h (module 'core'): void ns3::LogComponent::Enable(ns3::LogLevel const level) [member function]
cls.add_method('Enable',
'void',
[param('ns3::LogLevel const', 'level')])
## log.h (module 'core'): std::string ns3::LogComponent::File() const [member function]
cls.add_method('File',
'std::string',
[],
is_const=True)
## log.h (module 'core'): static std::map<std::basic_string<char, std::char_traits<char>, std::allocator<char> >,ns3::LogComponent*,std::less<std::basic_string<char, std::char_traits<char>, std::allocator<char> > >,std::allocator<std::pair<const std::basic_string<char, std::char_traits<char>, std::allocator<char> >, ns3::LogComponent*> > > * ns3::LogComponent::GetComponentList() [member function]
cls.add_method('GetComponentList',
'std::map< std::string, ns3::LogComponent * > *',
[],
is_static=True)
## log.h (module 'core'): static std::string ns3::LogComponent::GetLevelLabel(ns3::LogLevel const level) [member function]
cls.add_method('GetLevelLabel',
'std::string',
[param('ns3::LogLevel const', 'level')],
is_static=True)
## log.h (module 'core'): bool ns3::LogComponent::IsEnabled(ns3::LogLevel const level) const [member function]
cls.add_method('IsEnabled',
'bool',
[param('ns3::LogLevel const', 'level')],
is_const=True)
## log.h (module 'core'): bool ns3::LogComponent::IsNoneEnabled() const [member function]
cls.add_method('IsNoneEnabled',
'bool',
[],
is_const=True)
## log.h (module 'core'): char const * ns3::LogComponent::Name() const [member function]
cls.add_method('Name',
'char const *',
[],
is_const=True)
## log.h (module 'core'): void ns3::LogComponent::SetMask(ns3::LogLevel const level) [member function]
cls.add_method('SetMask',
'void',
[param('ns3::LogLevel const', 'level')])
return
def register_Ns3Names_methods(root_module, cls):
## names.h (module 'core'): ns3::Names::Names() [constructor]
cls.add_constructor([])
## names.h (module 'core'): ns3::Names::Names(ns3::Names const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Names const &', 'arg0')])
## names.h (module 'core'): static void ns3::Names::Add(std::string name, ns3::Ptr<ns3::Object> object) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::Object >', 'object')],
is_static=True)
## names.h (module 'core'): static void ns3::Names::Add(std::string path, std::string name, ns3::Ptr<ns3::Object> object) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'path'), param('std::string', 'name'), param('ns3::Ptr< ns3::Object >', 'object')],
is_static=True)
## names.h (module 'core'): static void ns3::Names::Add(ns3::Ptr<ns3::Object> context, std::string name, ns3::Ptr<ns3::Object> object) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::Object >', 'context'), param('std::string', 'name'), param('ns3::Ptr< ns3::Object >', 'object')],
is_static=True)
## names.h (module 'core'): static void ns3::Names::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_static=True)
## names.h (module 'core'): static std::string ns3::Names::FindName(ns3::Ptr<ns3::Object> object) [member function]
cls.add_method('FindName',
'std::string',
[param('ns3::Ptr< ns3::Object >', 'object')],
is_static=True)
## names.h (module 'core'): static std::string ns3::Names::FindPath(ns3::Ptr<ns3::Object> object) [member function]
cls.add_method('FindPath',
'std::string',
[param('ns3::Ptr< ns3::Object >', 'object')],
is_static=True)
## names.h (module 'core'): static void ns3::Names::Rename(std::string oldpath, std::string newname) [member function]
cls.add_method('Rename',
'void',
[param('std::string', 'oldpath'), param('std::string', 'newname')],
is_static=True)
## names.h (module 'core'): static void ns3::Names::Rename(std::string path, std::string oldname, std::string newname) [member function]
cls.add_method('Rename',
'void',
[param('std::string', 'path'), param('std::string', 'oldname'), param('std::string', 'newname')],
is_static=True)
## names.h (module 'core'): static void ns3::Names::Rename(ns3::Ptr<ns3::Object> context, std::string oldname, std::string newname) [member function]
cls.add_method('Rename',
'void',
[param('ns3::Ptr< ns3::Object >', 'context'), param('std::string', 'oldname'), param('std::string', 'newname')],
is_static=True)
return
def register_Ns3NonCopyable_methods(root_module, cls):
## non-copyable.h (module 'core'): ns3::NonCopyable::NonCopyable() [constructor]
cls.add_constructor([],
visibility='protected')
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]
cls.add_constructor([param('std::string', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3ParameterLogger_methods(root_module, cls):
## log.h (module 'core'): ns3::ParameterLogger::ParameterLogger(ns3::ParameterLogger const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ParameterLogger const &', 'arg0')])
## log.h (module 'core'): ns3::ParameterLogger::ParameterLogger(std::ostream & os) [constructor]
cls.add_constructor([param('std::ostream &', 'os')])
return
def register_Ns3RandomVariableStreamHelper_methods(root_module, cls):
## random-variable-stream-helper.h (module 'core'): ns3::RandomVariableStreamHelper::RandomVariableStreamHelper() [constructor]
cls.add_constructor([])
## random-variable-stream-helper.h (module 'core'): ns3::RandomVariableStreamHelper::RandomVariableStreamHelper(ns3::RandomVariableStreamHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RandomVariableStreamHelper const &', 'arg0')])
## random-variable-stream-helper.h (module 'core'): static int64_t ns3::RandomVariableStreamHelper::AssignStreams(std::string path, int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('std::string', 'path'), param('int64_t', 'stream')],
is_static=True)
return
def register_Ns3RngSeedManager_methods(root_module, cls):
## rng-seed-manager.h (module 'core'): ns3::RngSeedManager::RngSeedManager() [constructor]
cls.add_constructor([])
## rng-seed-manager.h (module 'core'): ns3::RngSeedManager::RngSeedManager(ns3::RngSeedManager const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RngSeedManager const &', 'arg0')])
## rng-seed-manager.h (module 'core'): static uint64_t ns3::RngSeedManager::GetNextStreamIndex() [member function]
cls.add_method('GetNextStreamIndex',
'uint64_t',
[],
is_static=True)
## rng-seed-manager.h (module 'core'): static uint64_t ns3::RngSeedManager::GetRun() [member function]
cls.add_method('GetRun',
'uint64_t',
[],
is_static=True)
## rng-seed-manager.h (module 'core'): static uint32_t ns3::RngSeedManager::GetSeed() [member function]
cls.add_method('GetSeed',
'uint32_t',
[],
is_static=True)
## rng-seed-manager.h (module 'core'): static void ns3::RngSeedManager::SetRun(uint64_t run) [member function]
cls.add_method('SetRun',
'void',
[param('uint64_t', 'run')],
is_static=True)
## rng-seed-manager.h (module 'core'): static void ns3::RngSeedManager::SetSeed(uint32_t seed) [member function]
cls.add_method('SetSeed',
'void',
[param('uint32_t', 'seed')],
is_static=True)
return
def register_Ns3RngStream_methods(root_module, cls):
## rng-stream.h (module 'core'): ns3::RngStream::RngStream(uint32_t seed, uint64_t stream, uint64_t substream) [constructor]
cls.add_constructor([param('uint32_t', 'seed'), param('uint64_t', 'stream'), param('uint64_t', 'substream')])
## rng-stream.h (module 'core'): ns3::RngStream::RngStream(ns3::RngStream const & r) [copy constructor]
cls.add_constructor([param('ns3::RngStream const &', 'r')])
## rng-stream.h (module 'core'): double ns3::RngStream::RandU01() [member function]
cls.add_method('RandU01',
'double',
[])
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Simulator_methods(root_module, cls):
## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Simulator const &', 'arg0')])
## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function]
cls.add_method('GetImplementation',
'ns3::Ptr< ns3::SimulatorImpl >',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function]
cls.add_method('IsFinished',
'bool',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function]
cls.add_method('SetImplementation',
'void',
[param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & delay) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'delay')],
is_static=True)
return
def register_Ns3Singleton__Ns3DesMetrics_methods(root_module, cls):
## singleton.h (module 'core'): ns3::Singleton<ns3::DesMetrics>::Singleton() [constructor]
cls.add_constructor([])
## singleton.h (module 'core'): static ns3::DesMetrics * ns3::Singleton<ns3::DesMetrics>::Get() [member function]
cls.add_method('Get',
'ns3::DesMetrics *',
[],
is_static=True)
return
def register_Ns3SystemCondition_methods(root_module, cls):
## system-condition.h (module 'core'): ns3::SystemCondition::SystemCondition(ns3::SystemCondition const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SystemCondition const &', 'arg0')])
## system-condition.h (module 'core'): ns3::SystemCondition::SystemCondition() [constructor]
cls.add_constructor([])
## system-condition.h (module 'core'): void ns3::SystemCondition::Broadcast() [member function]
cls.add_method('Broadcast',
'void',
[])
## system-condition.h (module 'core'): bool ns3::SystemCondition::GetCondition() [member function]
cls.add_method('GetCondition',
'bool',
[])
## system-condition.h (module 'core'): void ns3::SystemCondition::SetCondition(bool condition) [member function]
cls.add_method('SetCondition',
'void',
[param('bool', 'condition')])
## system-condition.h (module 'core'): void ns3::SystemCondition::Signal() [member function]
cls.add_method('Signal',
'void',
[])
## system-condition.h (module 'core'): bool ns3::SystemCondition::TimedWait(uint64_t ns) [member function]
cls.add_method('TimedWait',
'bool',
[param('uint64_t', 'ns')])
## system-condition.h (module 'core'): void ns3::SystemCondition::Wait() [member function]
cls.add_method('Wait',
'void',
[])
return
def register_Ns3SystemMutex_methods(root_module, cls):
## system-mutex.h (module 'core'): ns3::SystemMutex::SystemMutex(ns3::SystemMutex const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SystemMutex const &', 'arg0')])
## system-mutex.h (module 'core'): ns3::SystemMutex::SystemMutex() [constructor]
cls.add_constructor([])
## system-mutex.h (module 'core'): void ns3::SystemMutex::Lock() [member function]
cls.add_method('Lock',
'void',
[])
## system-mutex.h (module 'core'): void ns3::SystemMutex::Unlock() [member function]
cls.add_method('Unlock',
'void',
[])
return
def register_Ns3SystemWallClockMs_methods(root_module, cls):
## system-wall-clock-ms.h (module 'core'): ns3::SystemWallClockMs::SystemWallClockMs(ns3::SystemWallClockMs const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SystemWallClockMs const &', 'arg0')])
## system-wall-clock-ms.h (module 'core'): ns3::SystemWallClockMs::SystemWallClockMs() [constructor]
cls.add_constructor([])
## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::End() [member function]
cls.add_method('End',
'int64_t',
[])
## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::GetElapsedReal() const [member function]
cls.add_method('GetElapsedReal',
'int64_t',
[],
is_const=True)
## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::GetElapsedSystem() const [member function]
cls.add_method('GetElapsedSystem',
'int64_t',
[],
is_const=True)
## system-wall-clock-ms.h (module 'core'): int64_t ns3::SystemWallClockMs::GetElapsedUser() const [member function]
cls.add_method('GetElapsedUser',
'int64_t',
[],
is_const=True)
## system-wall-clock-ms.h (module 'core'): void ns3::SystemWallClockMs::Start() [member function]
cls.add_method('Start',
'void',
[])
return
def register_Ns3TimeWithUnit_methods(root_module, cls):
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor]
cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')])
return
def register_Ns3Timer_methods(root_module, cls):
## timer.h (module 'core'): ns3::Timer::Timer(ns3::Timer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Timer const &', 'arg0')])
## timer.h (module 'core'): ns3::Timer::Timer() [constructor]
cls.add_constructor([])
## timer.h (module 'core'): ns3::Timer::Timer(ns3::Timer::DestroyPolicy destroyPolicy) [constructor]
cls.add_constructor([param('ns3::Timer::DestroyPolicy', 'destroyPolicy')])
## timer.h (module 'core'): void ns3::Timer::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## timer.h (module 'core'): ns3::Time ns3::Timer::GetDelay() const [member function]
cls.add_method('GetDelay',
'ns3::Time',
[],
is_const=True)
## timer.h (module 'core'): ns3::Time ns3::Timer::GetDelayLeft() const [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[],
is_const=True)
## timer.h (module 'core'): ns3::Timer::State ns3::Timer::GetState() const [member function]
cls.add_method('GetState',
'ns3::Timer::State',
[],
is_const=True)
## timer.h (module 'core'): bool ns3::Timer::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## timer.h (module 'core'): bool ns3::Timer::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## timer.h (module 'core'): bool ns3::Timer::IsSuspended() const [member function]
cls.add_method('IsSuspended',
'bool',
[],
is_const=True)
## timer.h (module 'core'): void ns3::Timer::Remove() [member function]
cls.add_method('Remove',
'void',
[])
## timer.h (module 'core'): void ns3::Timer::Resume() [member function]
cls.add_method('Resume',
'void',
[])
## timer.h (module 'core'): void ns3::Timer::Schedule() [member function]
cls.add_method('Schedule',
'void',
[])
## timer.h (module 'core'): void ns3::Timer::Schedule(ns3::Time delay) [member function]
cls.add_method('Schedule',
'void',
[param('ns3::Time', 'delay')])
## timer.h (module 'core'): void ns3::Timer::SetDelay(ns3::Time const & delay) [member function]
cls.add_method('SetDelay',
'void',
[param('ns3::Time const &', 'delay')])
## timer.h (module 'core'): void ns3::Timer::Suspend() [member function]
cls.add_method('Suspend',
'void',
[])
return
def register_Ns3TimerImpl_methods(root_module, cls):
## timer-impl.h (module 'core'): ns3::TimerImpl::TimerImpl() [constructor]
cls.add_constructor([])
## timer-impl.h (module 'core'): ns3::TimerImpl::TimerImpl(ns3::TimerImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimerImpl const &', 'arg0')])
## timer-impl.h (module 'core'): void ns3::TimerImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## timer-impl.h (module 'core'): ns3::EventId ns3::TimerImpl::Schedule(ns3::Time const & delay) [member function]
cls.add_method('Schedule',
'ns3::EventId',
[param('ns3::Time const &', 'delay')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')],
deprecated=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function]
cls.add_method('GetSize',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name, ns3::TypeId::TraceSourceInformation * info) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name'), param('ns3::TypeId::TraceSourceInformation *', 'info')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function]
cls.add_method('SetSize',
'ns3::TypeId',
[param('std::size_t', 'size')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t uid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'uid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable]
cls.add_instance_attribute('callback', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3Vector2D_methods(root_module, cls):
cls.add_output_stream_operator()
## vector.h (module 'core'): ns3::Vector2D::Vector2D(ns3::Vector2D const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector2D::Vector2D(double _x, double _y) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y')])
## vector.h (module 'core'): ns3::Vector2D::Vector2D() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector2D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
return
def register_Ns3Vector3D_methods(root_module, cls):
cls.add_output_stream_operator()
## vector.h (module 'core'): ns3::Vector3D::Vector3D(ns3::Vector3D const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector3D::Vector3D(double _x, double _y, double _z) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y'), param('double', '_z')])
## vector.h (module 'core'): ns3::Vector3D::Vector3D() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector3D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector3D::z [variable]
cls.add_instance_attribute('z', 'double', is_const=False)
return
def register_Ns3Watchdog_methods(root_module, cls):
## watchdog.h (module 'core'): ns3::Watchdog::Watchdog(ns3::Watchdog const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Watchdog const &', 'arg0')])
## watchdog.h (module 'core'): ns3::Watchdog::Watchdog() [constructor]
cls.add_constructor([])
## watchdog.h (module 'core'): void ns3::Watchdog::Ping(ns3::Time delay) [member function]
cls.add_method('Ping',
'void',
[param('ns3::Time', 'delay')])
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long double v) [constructor]
cls.add_constructor([param('long double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::implementation [variable]
cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True)
return
def register_Ns3DesMetrics_methods(root_module, cls):
## des-metrics.h (module 'core'): ns3::DesMetrics::DesMetrics() [constructor]
cls.add_constructor([])
## des-metrics.h (module 'core'): void ns3::DesMetrics::Initialize(int argc, char * * argv, std::string outDir="") [member function]
cls.add_method('Initialize',
'void',
[param('int', 'argc'), param('char * *', 'argv'), param('std::string', 'outDir', default_value='""')])
## des-metrics.h (module 'core'): void ns3::DesMetrics::Trace(ns3::Time const & now, ns3::Time const & delay) [member function]
cls.add_method('Trace',
'void',
[param('ns3::Time const &', 'now'), param('ns3::Time const &', 'delay')])
## des-metrics.h (module 'core'): void ns3::DesMetrics::TraceWithContext(uint32_t context, ns3::Time const & now, ns3::Time const & delay) [member function]
cls.add_method('TraceWithContext',
'void',
[param('uint32_t', 'context'), param('ns3::Time const &', 'now'), param('ns3::Time const &', 'delay')])
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object> ns3::Object::GetObject(ns3::TypeId tid) const [member function]
cls.add_method('GetObject',
'ns3::Ptr< ns3::Object >',
[param('ns3::TypeId', 'tid')],
is_const=True, template_parameters=['ns3::Object'], custom_template_method_name=u'GetObject')
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): bool ns3::Object::IsInitialized() const [member function]
cls.add_method('IsInitialized',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3RandomVariableStream_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::RandomVariableStream::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::RandomVariableStream::RandomVariableStream() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetStream(int64_t stream) [member function]
cls.add_method('SetStream',
'void',
[param('int64_t', 'stream')])
## random-variable-stream.h (module 'core'): int64_t ns3::RandomVariableStream::GetStream() const [member function]
cls.add_method('GetStream',
'int64_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetAntithetic(bool isAntithetic) [member function]
cls.add_method('SetAntithetic',
'void',
[param('bool', 'isAntithetic')])
## random-variable-stream.h (module 'core'): bool ns3::RandomVariableStream::IsAntithetic() const [member function]
cls.add_method('IsAntithetic',
'bool',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::RandomVariableStream::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_pure_virtual=True, is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::RandomVariableStream::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_pure_virtual=True, is_virtual=True)
## random-variable-stream.h (module 'core'): ns3::RngStream * ns3::RandomVariableStream::Peek() const [member function]
cls.add_method('Peek',
'ns3::RngStream *',
[],
is_const=True, visibility='protected')
return
def register_Ns3Scheduler_methods(root_module, cls):
## scheduler.h (module 'core'): ns3::Scheduler::Scheduler() [constructor]
cls.add_constructor([])
## scheduler.h (module 'core'): ns3::Scheduler::Scheduler(ns3::Scheduler const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Scheduler const &', 'arg0')])
## scheduler.h (module 'core'): static ns3::TypeId ns3::Scheduler::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## scheduler.h (module 'core'): void ns3::Scheduler::Insert(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_pure_virtual=True, is_virtual=True)
## scheduler.h (module 'core'): bool ns3::Scheduler::IsEmpty() const [member function]
cls.add_method('IsEmpty',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## scheduler.h (module 'core'): ns3::Scheduler::Event ns3::Scheduler::PeekNext() const [member function]
cls.add_method('PeekNext',
'ns3::Scheduler::Event',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## scheduler.h (module 'core'): void ns3::Scheduler::Remove(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_pure_virtual=True, is_virtual=True)
## scheduler.h (module 'core'): ns3::Scheduler::Event ns3::Scheduler::RemoveNext() [member function]
cls.add_method('RemoveNext',
'ns3::Scheduler::Event',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3SchedulerEvent_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
## scheduler.h (module 'core'): ns3::Scheduler::Event::Event() [constructor]
cls.add_constructor([])
## scheduler.h (module 'core'): ns3::Scheduler::Event::Event(ns3::Scheduler::Event const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Scheduler::Event const &', 'arg0')])
## scheduler.h (module 'core'): ns3::Scheduler::Event::impl [variable]
cls.add_instance_attribute('impl', 'ns3::EventImpl *', is_const=False)
## scheduler.h (module 'core'): ns3::Scheduler::Event::key [variable]
cls.add_instance_attribute('key', 'ns3::Scheduler::EventKey', is_const=False)
return
def register_Ns3SchedulerEventKey_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
## scheduler.h (module 'core'): ns3::Scheduler::EventKey::EventKey() [constructor]
cls.add_constructor([])
## scheduler.h (module 'core'): ns3::Scheduler::EventKey::EventKey(ns3::Scheduler::EventKey const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Scheduler::EventKey const &', 'arg0')])
## scheduler.h (module 'core'): ns3::Scheduler::EventKey::m_context [variable]
cls.add_instance_attribute('m_context', 'uint32_t', is_const=False)
## scheduler.h (module 'core'): ns3::Scheduler::EventKey::m_ts [variable]
cls.add_instance_attribute('m_ts', 'uint64_t', is_const=False)
## scheduler.h (module 'core'): ns3::Scheduler::EventKey::m_uid [variable]
cls.add_instance_attribute('m_uid', 'uint32_t', is_const=False)
return
def register_Ns3SequentialRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::SequentialRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable::SequentialRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): ns3::Ptr<ns3::RandomVariableStream> ns3::SequentialRandomVariable::GetIncrement() const [member function]
cls.add_method('GetIncrement',
'ns3::Ptr< ns3::RandomVariableStream >',
[],
is_const=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetConsecutive() const [member function]
cls.add_method('GetConsecutive',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3FdReader_Ns3Empty_Ns3DefaultDeleter__lt__ns3FdReader__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >::SimpleRefCount(ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::FdReader, ns3::empty, ns3::DefaultDeleter< ns3::FdReader > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::FdReader, ns3::empty, ns3::DefaultDeleter<ns3::FdReader> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3RefCountBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3RefCountBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter< ns3::RefCountBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3SystemThread_Ns3Empty_Ns3DefaultDeleter__lt__ns3SystemThread__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::SimpleRefCount(ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter< ns3::SystemThread > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimulatorImpl_methods(root_module, cls):
## simulator-impl.h (module 'core'): ns3::SimulatorImpl::SimulatorImpl() [constructor]
cls.add_constructor([])
## simulator-impl.h (module 'core'): ns3::SimulatorImpl::SimulatorImpl(ns3::SimulatorImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SimulatorImpl const &', 'arg0')])
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::Cancel(ns3::EventId const & id) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'id')],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): uint32_t ns3::SimulatorImpl::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): ns3::Time ns3::SimulatorImpl::GetDelayLeft(ns3::EventId const & id) const [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): ns3::Time ns3::SimulatorImpl::GetMaximumSimulationTime() const [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): uint32_t ns3::SimulatorImpl::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): static ns3::TypeId ns3::SimulatorImpl::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## simulator-impl.h (module 'core'): bool ns3::SimulatorImpl::IsExpired(ns3::EventId const & id) const [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'id')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): bool ns3::SimulatorImpl::IsFinished() const [member function]
cls.add_method('IsFinished',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): ns3::Time ns3::SimulatorImpl::Now() const [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::Remove(ns3::EventId const & id) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'id')],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::Run() [member function]
cls.add_method('Run',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): ns3::EventId ns3::SimulatorImpl::Schedule(ns3::Time const & delay, ns3::EventImpl * event) [member function]
cls.add_method('Schedule',
'ns3::EventId',
[param('ns3::Time const &', 'delay'), param('ns3::EventImpl *', 'event')],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): ns3::EventId ns3::SimulatorImpl::ScheduleDestroy(ns3::EventImpl * event) [member function]
cls.add_method('ScheduleDestroy',
'ns3::EventId',
[param('ns3::EventImpl *', 'event')],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): ns3::EventId ns3::SimulatorImpl::ScheduleNow(ns3::EventImpl * event) [member function]
cls.add_method('ScheduleNow',
'ns3::EventId',
[param('ns3::EventImpl *', 'event')],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::ScheduleWithContext(uint32_t context, ns3::Time const & delay, ns3::EventImpl * event) [member function]
cls.add_method('ScheduleWithContext',
'void',
[param('uint32_t', 'context'), param('ns3::Time const &', 'delay'), param('ns3::EventImpl *', 'event')],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## simulator-impl.h (module 'core'): void ns3::SimulatorImpl::Stop(ns3::Time const & delay) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'delay')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3Synchronizer_methods(root_module, cls):
## synchronizer.h (module 'core'): ns3::Synchronizer::Synchronizer(ns3::Synchronizer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Synchronizer const &', 'arg0')])
## synchronizer.h (module 'core'): ns3::Synchronizer::Synchronizer() [constructor]
cls.add_constructor([])
## synchronizer.h (module 'core'): uint64_t ns3::Synchronizer::EventEnd() [member function]
cls.add_method('EventEnd',
'uint64_t',
[])
## synchronizer.h (module 'core'): void ns3::Synchronizer::EventStart() [member function]
cls.add_method('EventStart',
'void',
[])
## synchronizer.h (module 'core'): uint64_t ns3::Synchronizer::GetCurrentRealtime() [member function]
cls.add_method('GetCurrentRealtime',
'uint64_t',
[])
## synchronizer.h (module 'core'): int64_t ns3::Synchronizer::GetDrift(uint64_t ts) [member function]
cls.add_method('GetDrift',
'int64_t',
[param('uint64_t', 'ts')])
## synchronizer.h (module 'core'): uint64_t ns3::Synchronizer::GetOrigin() [member function]
cls.add_method('GetOrigin',
'uint64_t',
[])
## synchronizer.h (module 'core'): static ns3::TypeId ns3::Synchronizer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## synchronizer.h (module 'core'): bool ns3::Synchronizer::Realtime() [member function]
cls.add_method('Realtime',
'bool',
[])
## synchronizer.h (module 'core'): void ns3::Synchronizer::SetCondition(bool arg0) [member function]
cls.add_method('SetCondition',
'void',
[param('bool', 'arg0')])
## synchronizer.h (module 'core'): void ns3::Synchronizer::SetOrigin(uint64_t ts) [member function]
cls.add_method('SetOrigin',
'void',
[param('uint64_t', 'ts')])
## synchronizer.h (module 'core'): void ns3::Synchronizer::Signal() [member function]
cls.add_method('Signal',
'void',
[])
## synchronizer.h (module 'core'): bool ns3::Synchronizer::Synchronize(uint64_t tsCurrent, uint64_t tsDelay) [member function]
cls.add_method('Synchronize',
'bool',
[param('uint64_t', 'tsCurrent'), param('uint64_t', 'tsDelay')])
## synchronizer.h (module 'core'): uint64_t ns3::Synchronizer::DoEventEnd() [member function]
cls.add_method('DoEventEnd',
'uint64_t',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
## synchronizer.h (module 'core'): void ns3::Synchronizer::DoEventStart() [member function]
cls.add_method('DoEventStart',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
## synchronizer.h (module 'core'): uint64_t ns3::Synchronizer::DoGetCurrentRealtime() [member function]
cls.add_method('DoGetCurrentRealtime',
'uint64_t',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
## synchronizer.h (module 'core'): int64_t ns3::Synchronizer::DoGetDrift(uint64_t ns) [member function]
cls.add_method('DoGetDrift',
'int64_t',
[param('uint64_t', 'ns')],
is_pure_virtual=True, visibility='protected', is_virtual=True)
## synchronizer.h (module 'core'): bool ns3::Synchronizer::DoRealtime() [member function]
cls.add_method('DoRealtime',
'bool',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
## synchronizer.h (module 'core'): void ns3::Synchronizer::DoSetCondition(bool arg0) [member function]
cls.add_method('DoSetCondition',
'void',
[param('bool', 'arg0')],
is_pure_virtual=True, visibility='protected', is_virtual=True)
## synchronizer.h (module 'core'): void ns3::Synchronizer::DoSetOrigin(uint64_t ns) [member function]
cls.add_method('DoSetOrigin',
'void',
[param('uint64_t', 'ns')],
is_pure_virtual=True, visibility='protected', is_virtual=True)
## synchronizer.h (module 'core'): void ns3::Synchronizer::DoSignal() [member function]
cls.add_method('DoSignal',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
## synchronizer.h (module 'core'): bool ns3::Synchronizer::DoSynchronize(uint64_t nsCurrent, uint64_t nsDelay) [member function]
cls.add_method('DoSynchronize',
'bool',
[param('uint64_t', 'nsCurrent'), param('uint64_t', 'nsDelay')],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3SystemThread_methods(root_module, cls):
## system-thread.h (module 'core'): ns3::SystemThread::SystemThread(ns3::SystemThread const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SystemThread const &', 'arg0')])
## system-thread.h (module 'core'): ns3::SystemThread::SystemThread(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [constructor]
cls.add_constructor([param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')])
## system-thread.h (module 'core'): static bool ns3::SystemThread::Equals(pthread_t id) [member function]
cls.add_method('Equals',
'bool',
[param('pthread_t', 'id')],
is_static=True)
## system-thread.h (module 'core'): void ns3::SystemThread::Join() [member function]
cls.add_method('Join',
'void',
[])
## system-thread.h (module 'core'): static pthread_t ns3::SystemThread::Self() [member function]
cls.add_method('Self',
'pthread_t',
[],
is_static=True)
## system-thread.h (module 'core'): void ns3::SystemThread::Start() [member function]
cls.add_method('Start',
'void',
[])
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function]
cls.add_method('As',
'ns3::TimeWithUnit',
[param('ns3::Time::Unit const', 'unit')],
is_const=True)
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function]
cls.add_method('GetDays',
'double',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function]
cls.add_method('GetHours',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function]
cls.add_method('GetMinutes',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function]
cls.add_method('GetYears',
'double',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function]
cls.add_method('Max',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function]
cls.add_method('Min',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function]
cls.add_method('StaticInit',
'bool',
[],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TriangularRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::TriangularRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable::TriangularRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue(double mean, double min, double max) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'min'), param('double', 'max')])
## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger(uint32_t mean, uint32_t min, uint32_t max) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'min'), param('uint32_t', 'max')])
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3UniformRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::UniformRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable::UniformRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue(double min, double max) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'min'), param('double', 'max')])
## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger(uint32_t min, uint32_t max) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'min'), param('uint32_t', 'max')])
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3WallClockSynchronizer_methods(root_module, cls):
## wall-clock-synchronizer.h (module 'core'): ns3::WallClockSynchronizer::WallClockSynchronizer(ns3::WallClockSynchronizer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::WallClockSynchronizer const &', 'arg0')])
## wall-clock-synchronizer.h (module 'core'): ns3::WallClockSynchronizer::WallClockSynchronizer() [constructor]
cls.add_constructor([])
## wall-clock-synchronizer.h (module 'core'): static ns3::TypeId ns3::WallClockSynchronizer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## wall-clock-synchronizer.h (module 'core'): ns3::WallClockSynchronizer::NS_PER_SEC [variable]
cls.add_static_attribute('NS_PER_SEC', 'uint64_t const', is_const=True)
## wall-clock-synchronizer.h (module 'core'): ns3::WallClockSynchronizer::US_PER_NS [variable]
cls.add_static_attribute('US_PER_NS', 'uint64_t const', is_const=True)
## wall-clock-synchronizer.h (module 'core'): ns3::WallClockSynchronizer::US_PER_SEC [variable]
cls.add_static_attribute('US_PER_SEC', 'uint64_t const', is_const=True)
## wall-clock-synchronizer.h (module 'core'): uint64_t ns3::WallClockSynchronizer::DoEventEnd() [member function]
cls.add_method('DoEventEnd',
'uint64_t',
[],
visibility='protected', is_virtual=True)
## wall-clock-synchronizer.h (module 'core'): void ns3::WallClockSynchronizer::DoEventStart() [member function]
cls.add_method('DoEventStart',
'void',
[],
visibility='protected', is_virtual=True)
## wall-clock-synchronizer.h (module 'core'): uint64_t ns3::WallClockSynchronizer::DoGetCurrentRealtime() [member function]
cls.add_method('DoGetCurrentRealtime',
'uint64_t',
[],
visibility='protected', is_virtual=True)
## wall-clock-synchronizer.h (module 'core'): int64_t ns3::WallClockSynchronizer::DoGetDrift(uint64_t ns) [member function]
cls.add_method('DoGetDrift',
'int64_t',
[param('uint64_t', 'ns')],
visibility='protected', is_virtual=True)
## wall-clock-synchronizer.h (module 'core'): bool ns3::WallClockSynchronizer::DoRealtime() [member function]
cls.add_method('DoRealtime',
'bool',
[],
visibility='protected', is_virtual=True)
## wall-clock-synchronizer.h (module 'core'): void ns3::WallClockSynchronizer::DoSetCondition(bool cond) [member function]
cls.add_method('DoSetCondition',
'void',
[param('bool', 'cond')],
visibility='protected', is_virtual=True)
## wall-clock-synchronizer.h (module 'core'): void ns3::WallClockSynchronizer::DoSetOrigin(uint64_t ns) [member function]
cls.add_method('DoSetOrigin',
'void',
[param('uint64_t', 'ns')],
visibility='protected', is_virtual=True)
## wall-clock-synchronizer.h (module 'core'): void ns3::WallClockSynchronizer::DoSignal() [member function]
cls.add_method('DoSignal',
'void',
[],
visibility='protected', is_virtual=True)
## wall-clock-synchronizer.h (module 'core'): bool ns3::WallClockSynchronizer::DoSynchronize(uint64_t nsCurrent, uint64_t nsDelay) [member function]
cls.add_method('DoSynchronize',
'bool',
[param('uint64_t', 'nsCurrent'), param('uint64_t', 'nsDelay')],
visibility='protected', is_virtual=True)
## wall-clock-synchronizer.h (module 'core'): uint64_t ns3::WallClockSynchronizer::DriftCorrect(uint64_t nsNow, uint64_t nsDelay) [member function]
cls.add_method('DriftCorrect',
'uint64_t',
[param('uint64_t', 'nsNow'), param('uint64_t', 'nsDelay')],
visibility='protected')
## wall-clock-synchronizer.h (module 'core'): uint64_t ns3::WallClockSynchronizer::GetNormalizedRealtime() [member function]
cls.add_method('GetNormalizedRealtime',
'uint64_t',
[],
visibility='protected')
## wall-clock-synchronizer.h (module 'core'): uint64_t ns3::WallClockSynchronizer::GetRealtime() [member function]
cls.add_method('GetRealtime',
'uint64_t',
[],
visibility='protected')
## wall-clock-synchronizer.h (module 'core'): void ns3::WallClockSynchronizer::NsToTimeval(int64_t ns, timeval * tv) [member function]
cls.add_method('NsToTimeval',
'void',
[param('int64_t', 'ns'), param('timeval *', 'tv')],
visibility='protected')
## wall-clock-synchronizer.h (module 'core'): bool ns3::WallClockSynchronizer::SleepWait(uint64_t ns) [member function]
cls.add_method('SleepWait',
'bool',
[param('uint64_t', 'ns')],
visibility='protected')
## wall-clock-synchronizer.h (module 'core'): bool ns3::WallClockSynchronizer::SpinWait(uint64_t ns) [member function]
cls.add_method('SpinWait',
'bool',
[param('uint64_t', 'ns')],
visibility='protected')
## wall-clock-synchronizer.h (module 'core'): void ns3::WallClockSynchronizer::TimevalAdd(timeval * tv1, timeval * tv2, timeval * result) [member function]
cls.add_method('TimevalAdd',
'void',
[param('timeval *', 'tv1'), param('timeval *', 'tv2'), param('timeval *', 'result')],
visibility='protected')
## wall-clock-synchronizer.h (module 'core'): uint64_t ns3::WallClockSynchronizer::TimevalToNs(timeval * tv) [member function]
cls.add_method('TimevalToNs',
'uint64_t',
[param('timeval *', 'tv')],
visibility='protected')
return
def register_Ns3WeibullRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::WeibullRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable::WeibullRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetScale() const [member function]
cls.add_method('GetScale',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetShape() const [member function]
cls.add_method('GetShape',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue(double scale, double shape, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'scale'), param('double', 'shape'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger(uint32_t scale, uint32_t shape, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'scale'), param('uint32_t', 'shape'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ZetaRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZetaRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable::ZetaRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue(double alpha) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'alpha')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger(uint32_t alpha) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'alpha')])
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ZipfRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZipfRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable::ZipfRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue(uint32_t n, double alpha) [member function]
cls.add_method('GetValue',
'double',
[param('uint32_t', 'n'), param('double', 'alpha')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger(uint32_t n, uint32_t alpha) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'n'), param('uint32_t', 'alpha')])
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3BooleanChecker_methods(root_module, cls):
## boolean.h (module 'core'): ns3::BooleanChecker::BooleanChecker() [constructor]
cls.add_constructor([])
## boolean.h (module 'core'): ns3::BooleanChecker::BooleanChecker(ns3::BooleanChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BooleanChecker const &', 'arg0')])
return
def register_Ns3BooleanValue_methods(root_module, cls):
cls.add_output_stream_operator()
## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue(ns3::BooleanValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BooleanValue const &', 'arg0')])
## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue() [constructor]
cls.add_constructor([])
## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue(bool value) [constructor]
cls.add_constructor([param('bool', 'value')])
## boolean.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::BooleanValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## boolean.h (module 'core'): bool ns3::BooleanValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## boolean.h (module 'core'): bool ns3::BooleanValue::Get() const [member function]
cls.add_method('Get',
'bool',
[],
is_const=True)
## boolean.h (module 'core'): std::string ns3::BooleanValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## boolean.h (module 'core'): void ns3::BooleanValue::Set(bool value) [member function]
cls.add_method('Set',
'void',
[param('bool', 'value')])
return
def register_Ns3CalendarScheduler_methods(root_module, cls):
## calendar-scheduler.h (module 'core'): ns3::CalendarScheduler::CalendarScheduler(ns3::CalendarScheduler const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CalendarScheduler const &', 'arg0')])
## calendar-scheduler.h (module 'core'): ns3::CalendarScheduler::CalendarScheduler() [constructor]
cls.add_constructor([])
## calendar-scheduler.h (module 'core'): static ns3::TypeId ns3::CalendarScheduler::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## calendar-scheduler.h (module 'core'): void ns3::CalendarScheduler::Insert(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_virtual=True)
## calendar-scheduler.h (module 'core'): bool ns3::CalendarScheduler::IsEmpty() const [member function]
cls.add_method('IsEmpty',
'bool',
[],
is_const=True, is_virtual=True)
## calendar-scheduler.h (module 'core'): ns3::Scheduler::Event ns3::CalendarScheduler::PeekNext() const [member function]
cls.add_method('PeekNext',
'ns3::Scheduler::Event',
[],
is_const=True, is_virtual=True)
## calendar-scheduler.h (module 'core'): void ns3::CalendarScheduler::Remove(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_virtual=True)
## calendar-scheduler.h (module 'core'): ns3::Scheduler::Event ns3::CalendarScheduler::RemoveNext() [member function]
cls.add_method('RemoveNext',
'ns3::Scheduler::Event',
[],
is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3ConstantRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ConstantRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable::ConstantRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetConstant() const [member function]
cls.add_method('GetConstant',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue(double constant) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'constant')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger(uint32_t constant) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'constant')])
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3DefaultSimulatorImpl_methods(root_module, cls):
## default-simulator-impl.h (module 'core'): ns3::DefaultSimulatorImpl::DefaultSimulatorImpl(ns3::DefaultSimulatorImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DefaultSimulatorImpl const &', 'arg0')])
## default-simulator-impl.h (module 'core'): ns3::DefaultSimulatorImpl::DefaultSimulatorImpl() [constructor]
cls.add_constructor([])
## default-simulator-impl.h (module 'core'): void ns3::DefaultSimulatorImpl::Cancel(ns3::EventId const & id) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'id')],
is_virtual=True)
## default-simulator-impl.h (module 'core'): void ns3::DefaultSimulatorImpl::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_virtual=True)
## default-simulator-impl.h (module 'core'): uint32_t ns3::DefaultSimulatorImpl::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True, is_virtual=True)
## default-simulator-impl.h (module 'core'): ns3::Time ns3::DefaultSimulatorImpl::GetDelayLeft(ns3::EventId const & id) const [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_const=True, is_virtual=True)
## default-simulator-impl.h (module 'core'): ns3::Time ns3::DefaultSimulatorImpl::GetMaximumSimulationTime() const [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_const=True, is_virtual=True)
## default-simulator-impl.h (module 'core'): uint32_t ns3::DefaultSimulatorImpl::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True, is_virtual=True)
## default-simulator-impl.h (module 'core'): static ns3::TypeId ns3::DefaultSimulatorImpl::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## default-simulator-impl.h (module 'core'): bool ns3::DefaultSimulatorImpl::IsExpired(ns3::EventId const & id) const [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'id')],
is_const=True, is_virtual=True)
## default-simulator-impl.h (module 'core'): bool ns3::DefaultSimulatorImpl::IsFinished() const [member function]
cls.add_method('IsFinished',
'bool',
[],
is_const=True, is_virtual=True)
## default-simulator-impl.h (module 'core'): ns3::Time ns3::DefaultSimulatorImpl::Now() const [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_const=True, is_virtual=True)
## default-simulator-impl.h (module 'core'): void ns3::DefaultSimulatorImpl::Remove(ns3::EventId const & id) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'id')],
is_virtual=True)
## default-simulator-impl.h (module 'core'): void ns3::DefaultSimulatorImpl::Run() [member function]
cls.add_method('Run',
'void',
[],
is_virtual=True)
## default-simulator-impl.h (module 'core'): ns3::EventId ns3::DefaultSimulatorImpl::Schedule(ns3::Time const & delay, ns3::EventImpl * event) [member function]
cls.add_method('Schedule',
'ns3::EventId',
[param('ns3::Time const &', 'delay'), param('ns3::EventImpl *', 'event')],
is_virtual=True)
## default-simulator-impl.h (module 'core'): ns3::EventId ns3::DefaultSimulatorImpl::ScheduleDestroy(ns3::EventImpl * event) [member function]
cls.add_method('ScheduleDestroy',
'ns3::EventId',
[param('ns3::EventImpl *', 'event')],
is_virtual=True)
## default-simulator-impl.h (module 'core'): ns3::EventId ns3::DefaultSimulatorImpl::ScheduleNow(ns3::EventImpl * event) [member function]
cls.add_method('ScheduleNow',
'ns3::EventId',
[param('ns3::EventImpl *', 'event')],
is_virtual=True)
## default-simulator-impl.h (module 'core'): void ns3::DefaultSimulatorImpl::ScheduleWithContext(uint32_t context, ns3::Time const & delay, ns3::EventImpl * event) [member function]
cls.add_method('ScheduleWithContext',
'void',
[param('uint32_t', 'context'), param('ns3::Time const &', 'delay'), param('ns3::EventImpl *', 'event')],
is_virtual=True)
## default-simulator-impl.h (module 'core'): void ns3::DefaultSimulatorImpl::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_virtual=True)
## default-simulator-impl.h (module 'core'): void ns3::DefaultSimulatorImpl::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_virtual=True)
## default-simulator-impl.h (module 'core'): void ns3::DefaultSimulatorImpl::Stop(ns3::Time const & delay) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'delay')],
is_virtual=True)
## default-simulator-impl.h (module 'core'): void ns3::DefaultSimulatorImpl::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3DeterministicRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::DeterministicRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable::DeterministicRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::DeterministicRandomVariable::SetValueArray(double * values, uint64_t length) [member function]
cls.add_method('SetValueArray',
'void',
[param('double *', 'values'), param('uint64_t', 'length')])
## random-variable-stream.h (module 'core'): double ns3::DeterministicRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::DeterministicRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3DoubleValue_methods(root_module, cls):
## double.h (module 'core'): ns3::DoubleValue::DoubleValue() [constructor]
cls.add_constructor([])
## double.h (module 'core'): ns3::DoubleValue::DoubleValue(ns3::DoubleValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DoubleValue const &', 'arg0')])
## double.h (module 'core'): ns3::DoubleValue::DoubleValue(double const & value) [constructor]
cls.add_constructor([param('double const &', 'value')])
## double.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::DoubleValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## double.h (module 'core'): bool ns3::DoubleValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## double.h (module 'core'): double ns3::DoubleValue::Get() const [member function]
cls.add_method('Get',
'double',
[],
is_const=True)
## double.h (module 'core'): std::string ns3::DoubleValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## double.h (module 'core'): void ns3::DoubleValue::Set(double const & value) [member function]
cls.add_method('Set',
'void',
[param('double const &', 'value')])
return
def register_Ns3EmpiricalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable::EmpiricalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::CDF(double v, double c) [member function]
cls.add_method('CDF',
'void',
[param('double', 'v'), param('double', 'c')])
## random-variable-stream.h (module 'core'): uint32_t ns3::EmpiricalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::EmpiricalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::Interpolate(double c1, double c2, double v1, double v2, double r) [member function]
cls.add_method('Interpolate',
'double',
[param('double', 'c1'), param('double', 'c2'), param('double', 'v1'), param('double', 'v2'), param('double', 'r')],
visibility='private', is_virtual=True)
## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::Validate() [member function]
cls.add_method('Validate',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3EmptyAttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor(ns3::EmptyAttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object'), param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker(ns3::EmptyAttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3EnumChecker_methods(root_module, cls):
## enum.h (module 'core'): ns3::EnumChecker::EnumChecker(ns3::EnumChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EnumChecker const &', 'arg0')])
## enum.h (module 'core'): ns3::EnumChecker::EnumChecker() [constructor]
cls.add_constructor([])
## enum.h (module 'core'): void ns3::EnumChecker::Add(int value, std::string name) [member function]
cls.add_method('Add',
'void',
[param('int', 'value'), param('std::string', 'name')])
## enum.h (module 'core'): void ns3::EnumChecker::AddDefault(int value, std::string name) [member function]
cls.add_method('AddDefault',
'void',
[param('int', 'value'), param('std::string', 'name')])
## enum.h (module 'core'): bool ns3::EnumChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## enum.h (module 'core'): bool ns3::EnumChecker::Copy(ns3::AttributeValue const & src, ns3::AttributeValue & dst) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'src'), param('ns3::AttributeValue &', 'dst')],
is_const=True, is_virtual=True)
## enum.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EnumChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): std::string ns3::EnumChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): std::string ns3::EnumChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): bool ns3::EnumChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_virtual=True)
return
def register_Ns3EnumValue_methods(root_module, cls):
## enum.h (module 'core'): ns3::EnumValue::EnumValue(ns3::EnumValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EnumValue const &', 'arg0')])
## enum.h (module 'core'): ns3::EnumValue::EnumValue() [constructor]
cls.add_constructor([])
## enum.h (module 'core'): ns3::EnumValue::EnumValue(int value) [constructor]
cls.add_constructor([param('int', 'value')])
## enum.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EnumValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): bool ns3::EnumValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## enum.h (module 'core'): int ns3::EnumValue::Get() const [member function]
cls.add_method('Get',
'int',
[],
is_const=True)
## enum.h (module 'core'): std::string ns3::EnumValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## enum.h (module 'core'): void ns3::EnumValue::Set(int value) [member function]
cls.add_method('Set',
'void',
[param('int', 'value')])
return
def register_Ns3ErlangRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ErlangRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable::ErlangRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetK() const [member function]
cls.add_method('GetK',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetLambda() const [member function]
cls.add_method('GetLambda',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue(uint32_t k, double lambda) [member function]
cls.add_method('GetValue',
'double',
[param('uint32_t', 'k'), param('double', 'lambda')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger(uint32_t k, uint32_t lambda) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'k'), param('uint32_t', 'lambda')])
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3EventImpl_methods(root_module, cls):
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventImpl const &', 'arg0')])
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor]
cls.add_constructor([])
## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[])
## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function]
cls.add_method('IsCancelled',
'bool',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function]
cls.add_method('Notify',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3ExponentialRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ExponentialRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable::ExponentialRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue(double mean, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger(uint32_t mean, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3FdReader_methods(root_module, cls):
## unix-fd-reader.h (module 'core'): ns3::FdReader::FdReader(ns3::FdReader const & arg0) [copy constructor]
cls.add_constructor([param('ns3::FdReader const &', 'arg0')])
## unix-fd-reader.h (module 'core'): ns3::FdReader::FdReader() [constructor]
cls.add_constructor([])
## unix-fd-reader.h (module 'core'): void ns3::FdReader::Start(int fd, ns3::Callback<void, unsigned char*, int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> readCallback) [member function]
cls.add_method('Start',
'void',
[param('int', 'fd'), param('ns3::Callback< void, unsigned char *, int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'readCallback')])
## unix-fd-reader.h (module 'core'): void ns3::FdReader::Stop() [member function]
cls.add_method('Stop',
'void',
[])
## unix-fd-reader.h (module 'core'): ns3::FdReader::Data ns3::FdReader::DoRead() [member function]
cls.add_method('DoRead',
'ns3::FdReader::Data',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3GammaRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::GammaRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable::GammaRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetBeta() const [member function]
cls.add_method('GetBeta',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue(double alpha, double beta) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'alpha'), param('double', 'beta')])
## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger(uint32_t alpha, uint32_t beta) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'alpha'), param('uint32_t', 'beta')])
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3HeapScheduler_methods(root_module, cls):
## heap-scheduler.h (module 'core'): ns3::HeapScheduler::HeapScheduler(ns3::HeapScheduler const & arg0) [copy constructor]
cls.add_constructor([param('ns3::HeapScheduler const &', 'arg0')])
## heap-scheduler.h (module 'core'): ns3::HeapScheduler::HeapScheduler() [constructor]
cls.add_constructor([])
## heap-scheduler.h (module 'core'): static ns3::TypeId ns3::HeapScheduler::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## heap-scheduler.h (module 'core'): void ns3::HeapScheduler::Insert(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_virtual=True)
## heap-scheduler.h (module 'core'): bool ns3::HeapScheduler::IsEmpty() const [member function]
cls.add_method('IsEmpty',
'bool',
[],
is_const=True, is_virtual=True)
## heap-scheduler.h (module 'core'): ns3::Scheduler::Event ns3::HeapScheduler::PeekNext() const [member function]
cls.add_method('PeekNext',
'ns3::Scheduler::Event',
[],
is_const=True, is_virtual=True)
## heap-scheduler.h (module 'core'): void ns3::HeapScheduler::Remove(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_virtual=True)
## heap-scheduler.h (module 'core'): ns3::Scheduler::Event ns3::HeapScheduler::RemoveNext() [member function]
cls.add_method('RemoveNext',
'ns3::Scheduler::Event',
[],
is_virtual=True)
return
def register_Ns3IntegerValue_methods(root_module, cls):
## integer.h (module 'core'): ns3::IntegerValue::IntegerValue() [constructor]
cls.add_constructor([])
## integer.h (module 'core'): ns3::IntegerValue::IntegerValue(ns3::IntegerValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntegerValue const &', 'arg0')])
## integer.h (module 'core'): ns3::IntegerValue::IntegerValue(int64_t const & value) [constructor]
cls.add_constructor([param('int64_t const &', 'value')])
## integer.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::IntegerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## integer.h (module 'core'): bool ns3::IntegerValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## integer.h (module 'core'): int64_t ns3::IntegerValue::Get() const [member function]
cls.add_method('Get',
'int64_t',
[],
is_const=True)
## integer.h (module 'core'): std::string ns3::IntegerValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## integer.h (module 'core'): void ns3::IntegerValue::Set(int64_t const & value) [member function]
cls.add_method('Set',
'void',
[param('int64_t const &', 'value')])
return
def register_Ns3ListScheduler_methods(root_module, cls):
## list-scheduler.h (module 'core'): ns3::ListScheduler::ListScheduler(ns3::ListScheduler const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ListScheduler const &', 'arg0')])
## list-scheduler.h (module 'core'): ns3::ListScheduler::ListScheduler() [constructor]
cls.add_constructor([])
## list-scheduler.h (module 'core'): static ns3::TypeId ns3::ListScheduler::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## list-scheduler.h (module 'core'): void ns3::ListScheduler::Insert(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_virtual=True)
## list-scheduler.h (module 'core'): bool ns3::ListScheduler::IsEmpty() const [member function]
cls.add_method('IsEmpty',
'bool',
[],
is_const=True, is_virtual=True)
## list-scheduler.h (module 'core'): ns3::Scheduler::Event ns3::ListScheduler::PeekNext() const [member function]
cls.add_method('PeekNext',
'ns3::Scheduler::Event',
[],
is_const=True, is_virtual=True)
## list-scheduler.h (module 'core'): void ns3::ListScheduler::Remove(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_virtual=True)
## list-scheduler.h (module 'core'): ns3::Scheduler::Event ns3::ListScheduler::RemoveNext() [member function]
cls.add_method('RemoveNext',
'ns3::Scheduler::Event',
[],
is_virtual=True)
return
def register_Ns3LogNormalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::LogNormalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable::LogNormalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetMu() const [member function]
cls.add_method('GetMu',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetSigma() const [member function]
cls.add_method('GetSigma',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue(double mu, double sigma) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mu'), param('double', 'sigma')])
## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger(uint32_t mu, uint32_t sigma) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mu'), param('uint32_t', 'sigma')])
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3MapScheduler_methods(root_module, cls):
## map-scheduler.h (module 'core'): ns3::MapScheduler::MapScheduler(ns3::MapScheduler const & arg0) [copy constructor]
cls.add_constructor([param('ns3::MapScheduler const &', 'arg0')])
## map-scheduler.h (module 'core'): ns3::MapScheduler::MapScheduler() [constructor]
cls.add_constructor([])
## map-scheduler.h (module 'core'): static ns3::TypeId ns3::MapScheduler::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## map-scheduler.h (module 'core'): void ns3::MapScheduler::Insert(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Insert',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_virtual=True)
## map-scheduler.h (module 'core'): bool ns3::MapScheduler::IsEmpty() const [member function]
cls.add_method('IsEmpty',
'bool',
[],
is_const=True, is_virtual=True)
## map-scheduler.h (module 'core'): ns3::Scheduler::Event ns3::MapScheduler::PeekNext() const [member function]
cls.add_method('PeekNext',
'ns3::Scheduler::Event',
[],
is_const=True, is_virtual=True)
## map-scheduler.h (module 'core'): void ns3::MapScheduler::Remove(ns3::Scheduler::Event const & ev) [member function]
cls.add_method('Remove',
'void',
[param('ns3::Scheduler::Event const &', 'ev')],
is_virtual=True)
## map-scheduler.h (module 'core'): ns3::Scheduler::Event ns3::MapScheduler::RemoveNext() [member function]
cls.add_method('RemoveNext',
'ns3::Scheduler::Event',
[],
is_virtual=True)
return
def register_Ns3NormalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::INFINITE_VALUE [variable]
cls.add_static_attribute('INFINITE_VALUE', 'double const', is_const=True)
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::NormalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::NormalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetVariance() const [member function]
cls.add_method('GetVariance',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue(double mean, double variance, double bound=ns3::NormalRandomVariable::INFINITE_VALUE) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'variance'), param('double', 'bound', default_value='ns3::NormalRandomVariable::INFINITE_VALUE')])
## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger(uint32_t mean, uint32_t variance, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'variance'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3ObjectPtrContainerAccessor_methods(root_module, cls):
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerAccessor::ObjectPtrContainerAccessor() [constructor]
cls.add_constructor([])
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerAccessor::ObjectPtrContainerAccessor(ns3::ObjectPtrContainerAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectPtrContainerAccessor const &', 'arg0')])
## object-ptr-container.h (module 'core'): bool ns3::ObjectPtrContainerAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & value) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'value')],
is_const=True, is_virtual=True)
## object-ptr-container.h (module 'core'): bool ns3::ObjectPtrContainerAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_const=True, is_virtual=True)
## object-ptr-container.h (module 'core'): bool ns3::ObjectPtrContainerAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_const=True, is_virtual=True)
## object-ptr-container.h (module 'core'): bool ns3::ObjectPtrContainerAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object'), param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## object-ptr-container.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectPtrContainerAccessor::DoGet(ns3::ObjectBase const * object, uint32_t i, uint32_t * index) const [member function]
cls.add_method('DoGet',
'ns3::Ptr< ns3::Object >',
[param('ns3::ObjectBase const *', 'object'), param('uint32_t', 'i'), param('uint32_t *', 'index')],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## object-ptr-container.h (module 'core'): bool ns3::ObjectPtrContainerAccessor::DoGetN(ns3::ObjectBase const * object, uint32_t * n) const [member function]
cls.add_method('DoGetN',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('uint32_t *', 'n')],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3ObjectPtrContainerChecker_methods(root_module, cls):
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerChecker::ObjectPtrContainerChecker() [constructor]
cls.add_constructor([])
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerChecker::ObjectPtrContainerChecker(ns3::ObjectPtrContainerChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectPtrContainerChecker const &', 'arg0')])
## object-ptr-container.h (module 'core'): ns3::TypeId ns3::ObjectPtrContainerChecker::GetItemTypeId() const [member function]
cls.add_method('GetItemTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3ObjectPtrContainerValue_methods(root_module, cls):
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerValue::ObjectPtrContainerValue(ns3::ObjectPtrContainerValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectPtrContainerValue const &', 'arg0')])
## object-ptr-container.h (module 'core'): ns3::ObjectPtrContainerValue::ObjectPtrContainerValue() [constructor]
cls.add_constructor([])
## object-ptr-container.h (module 'core'): std::_Rb_tree_const_iterator<std::pair<const unsigned int, ns3::Ptr<ns3::Object> > > ns3::ObjectPtrContainerValue::Begin() const [member function]
cls.add_method('Begin',
'std::_Rb_tree_const_iterator< std::pair< unsigned int const, ns3::Ptr< ns3::Object > > >',
[],
is_const=True)
## object-ptr-container.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectPtrContainerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-ptr-container.h (module 'core'): bool ns3::ObjectPtrContainerValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-ptr-container.h (module 'core'): std::_Rb_tree_const_iterator<std::pair<const unsigned int, ns3::Ptr<ns3::Object> > > ns3::ObjectPtrContainerValue::End() const [member function]
cls.add_method('End',
'std::_Rb_tree_const_iterator< std::pair< unsigned int const, ns3::Ptr< ns3::Object > > >',
[],
is_const=True)
## object-ptr-container.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectPtrContainerValue::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::Object >',
[param('uint32_t', 'i')],
is_const=True)
## object-ptr-container.h (module 'core'): uint32_t ns3::ObjectPtrContainerValue::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
## object-ptr-container.h (module 'core'): std::string ns3::ObjectPtrContainerValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
return
def register_Ns3ParetoRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ParetoRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable::ParetoRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
deprecated=True, is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetScale() const [member function]
cls.add_method('GetScale',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetShape() const [member function]
cls.add_method('GetShape',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue(double scale, double shape, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'scale'), param('double', 'shape'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger(uint32_t scale, uint32_t shape, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'scale'), param('uint32_t', 'shape'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3PointerChecker_methods(root_module, cls):
## pointer.h (module 'core'): ns3::PointerChecker::PointerChecker() [constructor]
cls.add_constructor([])
## pointer.h (module 'core'): ns3::PointerChecker::PointerChecker(ns3::PointerChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PointerChecker const &', 'arg0')])
## pointer.h (module 'core'): ns3::TypeId ns3::PointerChecker::GetPointeeTypeId() const [member function]
cls.add_method('GetPointeeTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3PointerValue_methods(root_module, cls):
## pointer.h (module 'core'): ns3::PointerValue::PointerValue(ns3::PointerValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PointerValue const &', 'arg0')])
## pointer.h (module 'core'): ns3::PointerValue::PointerValue() [constructor]
cls.add_constructor([])
## pointer.h (module 'core'): ns3::PointerValue::PointerValue(ns3::Ptr<ns3::Object> object) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Object >', 'object')])
## pointer.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::PointerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## pointer.h (module 'core'): bool ns3::PointerValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## pointer.h (module 'core'): ns3::Ptr<ns3::Object> ns3::PointerValue::GetObject() const [member function]
cls.add_method('GetObject',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## pointer.h (module 'core'): std::string ns3::PointerValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## pointer.h (module 'core'): void ns3::PointerValue::SetObject(ns3::Ptr<ns3::Object> object) [member function]
cls.add_method('SetObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'object')])
return
def register_Ns3RealtimeSimulatorImpl_methods(root_module, cls):
## realtime-simulator-impl.h (module 'core'): ns3::RealtimeSimulatorImpl::RealtimeSimulatorImpl(ns3::RealtimeSimulatorImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RealtimeSimulatorImpl const &', 'arg0')])
## realtime-simulator-impl.h (module 'core'): ns3::RealtimeSimulatorImpl::RealtimeSimulatorImpl() [constructor]
cls.add_constructor([])
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::Cancel(ns3::EventId const & ev) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'ev')],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): uint32_t ns3::RealtimeSimulatorImpl::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True, is_virtual=True)
## realtime-simulator-impl.h (module 'core'): ns3::Time ns3::RealtimeSimulatorImpl::GetDelayLeft(ns3::EventId const & id) const [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_const=True, is_virtual=True)
## realtime-simulator-impl.h (module 'core'): ns3::Time ns3::RealtimeSimulatorImpl::GetHardLimit() const [member function]
cls.add_method('GetHardLimit',
'ns3::Time',
[],
is_const=True)
## realtime-simulator-impl.h (module 'core'): ns3::Time ns3::RealtimeSimulatorImpl::GetMaximumSimulationTime() const [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_const=True, is_virtual=True)
## realtime-simulator-impl.h (module 'core'): ns3::RealtimeSimulatorImpl::SynchronizationMode ns3::RealtimeSimulatorImpl::GetSynchronizationMode() const [member function]
cls.add_method('GetSynchronizationMode',
'ns3::RealtimeSimulatorImpl::SynchronizationMode',
[],
is_const=True)
## realtime-simulator-impl.h (module 'core'): uint32_t ns3::RealtimeSimulatorImpl::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True, is_virtual=True)
## realtime-simulator-impl.h (module 'core'): static ns3::TypeId ns3::RealtimeSimulatorImpl::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## realtime-simulator-impl.h (module 'core'): bool ns3::RealtimeSimulatorImpl::IsExpired(ns3::EventId const & ev) const [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'ev')],
is_const=True, is_virtual=True)
## realtime-simulator-impl.h (module 'core'): bool ns3::RealtimeSimulatorImpl::IsFinished() const [member function]
cls.add_method('IsFinished',
'bool',
[],
is_const=True, is_virtual=True)
## realtime-simulator-impl.h (module 'core'): ns3::Time ns3::RealtimeSimulatorImpl::Now() const [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_const=True, is_virtual=True)
## realtime-simulator-impl.h (module 'core'): ns3::Time ns3::RealtimeSimulatorImpl::RealtimeNow() const [member function]
cls.add_method('RealtimeNow',
'ns3::Time',
[],
is_const=True)
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::Remove(ns3::EventId const & ev) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'ev')],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::Run() [member function]
cls.add_method('Run',
'void',
[],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): ns3::EventId ns3::RealtimeSimulatorImpl::Schedule(ns3::Time const & delay, ns3::EventImpl * event) [member function]
cls.add_method('Schedule',
'ns3::EventId',
[param('ns3::Time const &', 'delay'), param('ns3::EventImpl *', 'event')],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): ns3::EventId ns3::RealtimeSimulatorImpl::ScheduleDestroy(ns3::EventImpl * event) [member function]
cls.add_method('ScheduleDestroy',
'ns3::EventId',
[param('ns3::EventImpl *', 'event')],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): ns3::EventId ns3::RealtimeSimulatorImpl::ScheduleNow(ns3::EventImpl * event) [member function]
cls.add_method('ScheduleNow',
'ns3::EventId',
[param('ns3::EventImpl *', 'event')],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::ScheduleRealtime(ns3::Time const & delay, ns3::EventImpl * event) [member function]
cls.add_method('ScheduleRealtime',
'void',
[param('ns3::Time const &', 'delay'), param('ns3::EventImpl *', 'event')])
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::ScheduleRealtimeNow(ns3::EventImpl * event) [member function]
cls.add_method('ScheduleRealtimeNow',
'void',
[param('ns3::EventImpl *', 'event')])
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::ScheduleRealtimeNowWithContext(uint32_t context, ns3::EventImpl * event) [member function]
cls.add_method('ScheduleRealtimeNowWithContext',
'void',
[param('uint32_t', 'context'), param('ns3::EventImpl *', 'event')])
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::ScheduleRealtimeWithContext(uint32_t context, ns3::Time const & delay, ns3::EventImpl * event) [member function]
cls.add_method('ScheduleRealtimeWithContext',
'void',
[param('uint32_t', 'context'), param('ns3::Time const &', 'delay'), param('ns3::EventImpl *', 'event')])
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::ScheduleWithContext(uint32_t context, ns3::Time const & delay, ns3::EventImpl * event) [member function]
cls.add_method('ScheduleWithContext',
'void',
[param('uint32_t', 'context'), param('ns3::Time const &', 'delay'), param('ns3::EventImpl *', 'event')],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::SetHardLimit(ns3::Time limit) [member function]
cls.add_method('SetHardLimit',
'void',
[param('ns3::Time', 'limit')])
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::SetSynchronizationMode(ns3::RealtimeSimulatorImpl::SynchronizationMode mode) [member function]
cls.add_method('SetSynchronizationMode',
'void',
[param('ns3::RealtimeSimulatorImpl::SynchronizationMode', 'mode')])
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::Stop(ns3::Time const & delay) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'delay')],
is_virtual=True)
## realtime-simulator-impl.h (module 'core'): void ns3::RealtimeSimulatorImpl::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3RefCountBase_methods(root_module, cls):
## ref-count-base.h (module 'core'): ns3::RefCountBase::RefCountBase() [constructor]
cls.add_constructor([])
## ref-count-base.h (module 'core'): ns3::RefCountBase::RefCountBase(ns3::RefCountBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RefCountBase const &', 'arg0')])
return
def register_Ns3StringChecker_methods(root_module, cls):
## string.h (module 'core'): ns3::StringChecker::StringChecker() [constructor]
cls.add_constructor([])
## string.h (module 'core'): ns3::StringChecker::StringChecker(ns3::StringChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::StringChecker const &', 'arg0')])
return
def register_Ns3StringValue_methods(root_module, cls):
## string.h (module 'core'): ns3::StringValue::StringValue() [constructor]
cls.add_constructor([])
## string.h (module 'core'): ns3::StringValue::StringValue(ns3::StringValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::StringValue const &', 'arg0')])
## string.h (module 'core'): ns3::StringValue::StringValue(std::string const & value) [constructor]
cls.add_constructor([param('std::string const &', 'value')])
## string.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::StringValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## string.h (module 'core'): bool ns3::StringValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## string.h (module 'core'): std::string ns3::StringValue::Get() const [member function]
cls.add_method('Get',
'std::string',
[],
is_const=True)
## string.h (module 'core'): std::string ns3::StringValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## string.h (module 'core'): void ns3::StringValue::Set(std::string const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string const &', 'value')])
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3UintegerValue_methods(root_module, cls):
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue() [constructor]
cls.add_constructor([])
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue(ns3::UintegerValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UintegerValue const &', 'arg0')])
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue(uint64_t const & value) [constructor]
cls.add_constructor([param('uint64_t const &', 'value')])
## uinteger.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::UintegerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## uinteger.h (module 'core'): bool ns3::UintegerValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## uinteger.h (module 'core'): uint64_t ns3::UintegerValue::Get() const [member function]
cls.add_method('Get',
'uint64_t',
[],
is_const=True)
## uinteger.h (module 'core'): std::string ns3::UintegerValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## uinteger.h (module 'core'): void ns3::UintegerValue::Set(uint64_t const & value) [member function]
cls.add_method('Set',
'void',
[param('uint64_t const &', 'value')])
return
def register_Ns3Vector2DChecker_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker(ns3::Vector2DChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2DChecker const &', 'arg0')])
return
def register_Ns3Vector2DValue_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2DValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2DValue const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2D const & value) [constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'value')])
## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector2DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h (module 'core'): bool ns3::Vector2DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h (module 'core'): ns3::Vector2D ns3::Vector2DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector2D',
[],
is_const=True)
## vector.h (module 'core'): std::string ns3::Vector2DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h (module 'core'): void ns3::Vector2DValue::Set(ns3::Vector2D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector2D const &', 'value')])
return
def register_Ns3Vector3DChecker_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker(ns3::Vector3DChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3DChecker const &', 'arg0')])
return
def register_Ns3Vector3DValue_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3DValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3DValue const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3D const & value) [constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'value')])
## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector3DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h (module 'core'): bool ns3::Vector3DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h (module 'core'): ns3::Vector3D ns3::Vector3DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector3D',
[],
is_const=True)
## vector.h (module 'core'): std::string ns3::Vector3DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h (module 'core'): void ns3::Vector3DValue::Set(ns3::Vector3D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector3D const &', 'value')])
return
def register_Ns3ConfigMatchContainer_methods(root_module, cls):
## config.h (module 'core'): ns3::Config::MatchContainer::MatchContainer(ns3::Config::MatchContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Config::MatchContainer const &', 'arg0')])
## config.h (module 'core'): ns3::Config::MatchContainer::MatchContainer() [constructor]
cls.add_constructor([])
## config.h (module 'core'): ns3::Config::MatchContainer::MatchContainer(std::vector<ns3::Ptr<ns3::Object>, std::allocator<ns3::Ptr<ns3::Object> > > const & objects, std::vector<std::string, std::allocator<std::string> > const & contexts, std::string path) [constructor]
cls.add_constructor([param('std::vector< ns3::Ptr< ns3::Object > > const &', 'objects'), param('std::vector< std::string > const &', 'contexts'), param('std::string', 'path')])
## config.h (module 'core'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Object>*,std::vector<ns3::Ptr<ns3::Object>, std::allocator<ns3::Ptr<ns3::Object> > > > ns3::Config::MatchContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Object > const, std::vector< ns3::Ptr< ns3::Object > > >',
[],
is_const=True)
## config.h (module 'core'): void ns3::Config::MatchContainer::Connect(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('Connect',
'void',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## config.h (module 'core'): void ns3::Config::MatchContainer::ConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('ConnectWithoutContext',
'void',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## config.h (module 'core'): void ns3::Config::MatchContainer::Disconnect(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('Disconnect',
'void',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## config.h (module 'core'): void ns3::Config::MatchContainer::DisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('DisconnectWithoutContext',
'void',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## config.h (module 'core'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Object>*,std::vector<ns3::Ptr<ns3::Object>, std::allocator<ns3::Ptr<ns3::Object> > > > ns3::Config::MatchContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Object > const, std::vector< ns3::Ptr< ns3::Object > > >',
[],
is_const=True)
## config.h (module 'core'): ns3::Ptr<ns3::Object> ns3::Config::MatchContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::Object >',
[param('uint32_t', 'i')],
is_const=True)
## config.h (module 'core'): std::string ns3::Config::MatchContainer::GetMatchedPath(uint32_t i) const [member function]
cls.add_method('GetMatchedPath',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## config.h (module 'core'): uint32_t ns3::Config::MatchContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
## config.h (module 'core'): std::string ns3::Config::MatchContainer::GetPath() const [member function]
cls.add_method('GetPath',
'std::string',
[],
is_const=True)
## config.h (module 'core'): void ns3::Config::MatchContainer::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
## nstime.h (module 'core'): ns3::Time ns3::Abs(ns3::Time const & time) [free function]
module.add_function('Abs',
'ns3::Time',
[param('ns3::Time const &', 'time')])
## int64x64.h (module 'core'): ns3::int64x64_t ns3::Abs(ns3::int64x64_t const & value) [free function]
module.add_function('Abs',
'ns3::int64x64_t',
[param('ns3::int64x64_t const &', 'value')])
## breakpoint.h (module 'core'): extern void ns3::BreakpointFallback() [free function]
module.add_function('BreakpointFallback',
'void',
[])
## vector.h (module 'core'): extern double ns3::CalculateDistance(ns3::Vector2D const & a, ns3::Vector2D const & b) [free function]
module.add_function('CalculateDistance',
'double',
[param('ns3::Vector2D const &', 'a'), param('ns3::Vector2D const &', 'b')])
## vector.h (module 'core'): extern double ns3::CalculateDistance(ns3::Vector3D const & a, ns3::Vector3D const & b) [free function]
module.add_function('CalculateDistance',
'double',
[param('ns3::Vector3D const &', 'a'), param('ns3::Vector3D const &', 'b')])
## ptr.h (module 'core'): extern ns3::Ptr<ns3::ObjectPtrContainerValue> ns3::Create() [free function]
module.add_function('Create',
'ns3::Ptr< ns3::ObjectPtrContainerValue >',
[],
template_parameters=['ns3::ObjectPtrContainerValue'])
## ptr.h (module 'core'): extern ns3::Ptr<ns3::PointerValue> ns3::Create() [free function]
module.add_function('Create',
'ns3::Ptr< ns3::PointerValue >',
[],
template_parameters=['ns3::PointerValue'])
## nstime.h (module 'core'): ns3::Time ns3::Days(ns3::int64x64_t value) [free function]
module.add_function('Days',
'ns3::Time',
[param('ns3::int64x64_t', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::Days(double value) [free function]
module.add_function('Days',
'ns3::Time',
[param('double', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::FemtoSeconds(ns3::int64x64_t value) [free function]
module.add_function('FemtoSeconds',
'ns3::Time',
[param('ns3::int64x64_t', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::FemtoSeconds(uint64_t value) [free function]
module.add_function('FemtoSeconds',
'ns3::Time',
[param('uint64_t', 'value')])
## hash.h (module 'core'): uint32_t ns3::Hash32(std::string const s) [free function]
module.add_function('Hash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint32_t ns3::Hash32(char const * buffer, size_t const size) [free function]
module.add_function('Hash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hash64(std::string const s) [free function]
module.add_function('Hash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hash64(char const * buffer, size_t const size) [free function]
module.add_function('Hash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## nstime.h (module 'core'): ns3::Time ns3::Hours(ns3::int64x64_t value) [free function]
module.add_function('Hours',
'ns3::Time',
[param('ns3::int64x64_t', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::Hours(double value) [free function]
module.add_function('Hours',
'ns3::Time',
[param('double', 'value')])
## log.h (module 'core'): extern void ns3::LogComponentDisable(char const * name, ns3::LogLevel level) [free function]
module.add_function('LogComponentDisable',
'void',
[param('char const *', 'name'), param('ns3::LogLevel', 'level')])
## log.h (module 'core'): extern void ns3::LogComponentDisableAll(ns3::LogLevel level) [free function]
module.add_function('LogComponentDisableAll',
'void',
[param('ns3::LogLevel', 'level')])
## log.h (module 'core'): extern void ns3::LogComponentEnable(char const * name, ns3::LogLevel level) [free function]
module.add_function('LogComponentEnable',
'void',
[param('char const *', 'name'), param('ns3::LogLevel', 'level')])
## log.h (module 'core'): extern void ns3::LogComponentEnableAll(ns3::LogLevel level) [free function]
module.add_function('LogComponentEnableAll',
'void',
[param('ns3::LogLevel', 'level')])
## log.h (module 'core'): extern void ns3::LogComponentPrintList() [free function]
module.add_function('LogComponentPrintList',
'void',
[])
## log.h (module 'core'): extern ns3::LogNodePrinter ns3::LogGetNodePrinter() [free function]
module.add_function('LogGetNodePrinter',
'ns3::LogNodePrinter',
[])
## log.h (module 'core'): extern ns3::LogTimePrinter ns3::LogGetTimePrinter() [free function]
module.add_function('LogGetTimePrinter',
'ns3::LogTimePrinter',
[])
## log.h (module 'core'): extern void ns3::LogSetNodePrinter(ns3::LogNodePrinter np) [free function]
module.add_function('LogSetNodePrinter',
'void',
[param('ns3::LogNodePrinter', 'np')])
## log.h (module 'core'): extern void ns3::LogSetTimePrinter(ns3::LogTimePrinter lp) [free function]
module.add_function('LogSetTimePrinter',
'void',
[param('ns3::LogTimePrinter', 'lp')])
## boolean.h (module 'core'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeBooleanChecker() [free function]
module.add_function('MakeBooleanChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## callback.h (module 'core'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeCallbackChecker() [free function]
module.add_function('MakeCallbackChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeAccessor const> ns3::MakeEmptyAttributeAccessor() [free function]
module.add_function('MakeEmptyAttributeAccessor',
'ns3::Ptr< ns3::AttributeAccessor const >',
[])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeChecker> ns3::MakeEmptyAttributeChecker() [free function]
module.add_function('MakeEmptyAttributeChecker',
'ns3::Ptr< ns3::AttributeChecker >',
[])
## trace-source-accessor.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::MakeEmptyTraceSourceAccessor() [free function]
module.add_function('MakeEmptyTraceSourceAccessor',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[])
## enum.h (module 'core'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeEnumChecker(int v1, std::string n1, int v2=0, std::string n2="", int v3=0, std::string n3="", int v4=0, std::string n4="", int v5=0, std::string n5="", int v6=0, std::string n6="", int v7=0, std::string n7="", int v8=0, std::string n8="", int v9=0, std::string n9="", int v10=0, std::string n10="", int v11=0, std::string n11="", int v12=0, std::string n12="", int v13=0, std::string n13="", int v14=0, std::string n14="", int v15=0, std::string n15="", int v16=0, std::string n16="", int v17=0, std::string n17="", int v18=0, std::string n18="", int v19=0, std::string n19="", int v20=0, std::string n20="", int v21=0, std::string n21="", int v22=0, std::string n22="") [free function]
module.add_function('MakeEnumChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[param('int', 'v1'), param('std::string', 'n1'), param('int', 'v2', default_value='0'), param('std::string', 'n2', default_value='""'), param('int', 'v3', default_value='0'), param('std::string', 'n3', default_value='""'), param('int', 'v4', default_value='0'), param('std::string', 'n4', default_value='""'), param('int', 'v5', default_value='0'), param('std::string', 'n5', default_value='""'), param('int', 'v6', default_value='0'), param('std::string', 'n6', default_value='""'), param('int', 'v7', default_value='0'), param('std::string', 'n7', default_value='""'), param('int', 'v8', default_value='0'), param('std::string', 'n8', default_value='""'), param('int', 'v9', default_value='0'), param('std::string', 'n9', default_value='""'), param('int', 'v10', default_value='0'), param('std::string', 'n10', default_value='""'), param('int', 'v11', default_value='0'), param('std::string', 'n11', default_value='""'), param('int', 'v12', default_value='0'), param('std::string', 'n12', default_value='""'), param('int', 'v13', default_value='0'), param('std::string', 'n13', default_value='""'), param('int', 'v14', default_value='0'), param('std::string', 'n14', default_value='""'), param('int', 'v15', default_value='0'), param('std::string', 'n15', default_value='""'), param('int', 'v16', default_value='0'), param('std::string', 'n16', default_value='""'), param('int', 'v17', default_value='0'), param('std::string', 'n17', default_value='""'), param('int', 'v18', default_value='0'), param('std::string', 'n18', default_value='""'), param('int', 'v19', default_value='0'), param('std::string', 'n19', default_value='""'), param('int', 'v20', default_value='0'), param('std::string', 'n20', default_value='""'), param('int', 'v21', default_value='0'), param('std::string', 'n21', default_value='""'), param('int', 'v22', default_value='0'), param('std::string', 'n22', default_value='""')])
## make-event.h (module 'core'): extern ns3::EventImpl * ns3::MakeEvent(void (*)( ) * f) [free function]
module.add_function('MakeEvent',
'ns3::EventImpl *',
[param('void ( * ) ( ) *', 'f')])
## object-factory.h (module 'core'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeObjectFactoryChecker() [free function]
module.add_function('MakeObjectFactoryChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## string.h (module 'core'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeStringChecker() [free function]
module.add_function('MakeStringChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeChecker const> ns3::MakeTimeChecker() [free function]
module.add_function('MakeTimeChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeChecker const> ns3::MakeTimeChecker(ns3::Time const min) [free function]
module.add_function('MakeTimeChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[param('ns3::Time const', 'min')])
## nstime.h (module 'core'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeTimeChecker(ns3::Time const min, ns3::Time const max) [free function]
module.add_function('MakeTimeChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[param('ns3::Time const', 'min'), param('ns3::Time const', 'max')])
## type-id.h (module 'core'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeTypeIdChecker() [free function]
module.add_function('MakeTypeIdChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## vector.h (module 'core'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeVector2DChecker() [free function]
module.add_function('MakeVector2DChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## vector.h (module 'core'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeVector3DChecker() [free function]
module.add_function('MakeVector3DChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## vector.h (module 'core'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeVectorChecker() [free function]
module.add_function('MakeVectorChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## nstime.h (module 'core'): ns3::Time ns3::Max(ns3::Time const & ta, ns3::Time const & tb) [free function]
module.add_function('Max',
'ns3::Time',
[param('ns3::Time const &', 'ta'), param('ns3::Time const &', 'tb')])
## int64x64.h (module 'core'): ns3::int64x64_t ns3::Max(ns3::int64x64_t const & a, ns3::int64x64_t const & b) [free function]
module.add_function('Max',
'ns3::int64x64_t',
[param('ns3::int64x64_t const &', 'a'), param('ns3::int64x64_t const &', 'b')])
## nstime.h (module 'core'): ns3::Time ns3::MicroSeconds(ns3::int64x64_t value) [free function]
module.add_function('MicroSeconds',
'ns3::Time',
[param('ns3::int64x64_t', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::MicroSeconds(uint64_t value) [free function]
module.add_function('MicroSeconds',
'ns3::Time',
[param('uint64_t', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::MilliSeconds(ns3::int64x64_t value) [free function]
module.add_function('MilliSeconds',
'ns3::Time',
[param('ns3::int64x64_t', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::MilliSeconds(uint64_t value) [free function]
module.add_function('MilliSeconds',
'ns3::Time',
[param('uint64_t', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::Min(ns3::Time const & ta, ns3::Time const & tb) [free function]
module.add_function('Min',
'ns3::Time',
[param('ns3::Time const &', 'ta'), param('ns3::Time const &', 'tb')])
## int64x64.h (module 'core'): ns3::int64x64_t ns3::Min(ns3::int64x64_t const & a, ns3::int64x64_t const & b) [free function]
module.add_function('Min',
'ns3::int64x64_t',
[param('ns3::int64x64_t const &', 'a'), param('ns3::int64x64_t const &', 'b')])
## nstime.h (module 'core'): ns3::Time ns3::Minutes(ns3::int64x64_t value) [free function]
module.add_function('Minutes',
'ns3::Time',
[param('ns3::int64x64_t', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::Minutes(double value) [free function]
module.add_function('Minutes',
'ns3::Time',
[param('double', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::NanoSeconds(ns3::int64x64_t value) [free function]
module.add_function('NanoSeconds',
'ns3::Time',
[param('ns3::int64x64_t', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::NanoSeconds(uint64_t value) [free function]
module.add_function('NanoSeconds',
'ns3::Time',
[param('uint64_t', 'value')])
## simulator.h (module 'core'): extern ns3::Time ns3::Now() [free function]
module.add_function('Now',
'ns3::Time',
[])
## nstime.h (module 'core'): ns3::Time ns3::PicoSeconds(ns3::int64x64_t value) [free function]
module.add_function('PicoSeconds',
'ns3::Time',
[param('ns3::int64x64_t', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::PicoSeconds(uint64_t value) [free function]
module.add_function('PicoSeconds',
'ns3::Time',
[param('uint64_t', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::Seconds(ns3::int64x64_t value) [free function]
module.add_function('Seconds',
'ns3::Time',
[param('ns3::int64x64_t', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::Seconds(double value) [free function]
module.add_function('Seconds',
'ns3::Time',
[param('double', 'value')])
## test.h (module 'core'): extern bool ns3::TestDoubleIsEqual(double const a, double const b, double const epsilon=std::numeric_limits<double>::epsilon()) [free function]
module.add_function('TestDoubleIsEqual',
'bool',
[param('double const', 'a'), param('double const', 'b'), param('double const', 'epsilon', default_value='std::numeric_limits<double>::epsilon()')])
## nstime.h (module 'core'): ns3::Time ns3::TimeStep(uint64_t ts) [free function]
module.add_function('TimeStep',
'ns3::Time',
[param('uint64_t', 'ts')])
## type-name.h (module 'core'): extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['double'])
## type-name.h (module 'core'): extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['float'])
## type-name.h (module 'core'): extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['unsigned long long'])
## type-name.h (module 'core'): extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['unsigned int'])
## type-name.h (module 'core'): extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['unsigned short'])
## type-name.h (module 'core'): extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['unsigned char'])
## type-name.h (module 'core'): extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['long'])
## type-name.h (module 'core'): extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['int'])
## type-name.h (module 'core'): extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['short'])
## type-name.h (module 'core'): extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['signed char'])
## nstime.h (module 'core'): ns3::Time ns3::Years(ns3::int64x64_t value) [free function]
module.add_function('Years',
'ns3::Time',
[param('ns3::int64x64_t', 'value')])
## nstime.h (module 'core'): ns3::Time ns3::Years(double value) [free function]
module.add_function('Years',
'ns3::Time',
[param('double', 'value')])
register_functions_ns3_CommandLineHelper(module.get_submodule('CommandLineHelper'), root_module)
register_functions_ns3_Config(module.get_submodule('Config'), root_module)
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_Hash(module.get_submodule('Hash'), root_module)
register_functions_ns3_SystemPath(module.get_submodule('SystemPath'), root_module)
register_functions_ns3_TracedValueCallback(module.get_submodule('TracedValueCallback'), root_module)
register_functions_ns3_internal(module.get_submodule('internal'), root_module)
return
def register_functions_ns3_CommandLineHelper(module, root_module):
## command-line.h (module 'core'): extern std::string ns3::CommandLineHelper::GetDefault(bool const & val) [free function]
module.add_function('GetDefault',
'std::string',
[param('bool const &', 'val')],
template_parameters=['bool'])
## command-line.h (module 'core'): extern bool ns3::CommandLineHelper::UserItemParse(std::string const value, bool & val) [free function]
module.add_function('UserItemParse',
'bool',
[param('std::string const', 'value'), param('bool &', 'val')],
template_parameters=['bool'])
return
def register_functions_ns3_Config(module, root_module):
## config.h (module 'core'): extern void ns3::Config::Connect(std::string path, ns3::CallbackBase const & cb) [free function]
module.add_function('Connect',
'void',
[param('std::string', 'path'), param('ns3::CallbackBase const &', 'cb')])
## config.h (module 'core'): extern void ns3::Config::ConnectWithoutContext(std::string path, ns3::CallbackBase const & cb) [free function]
module.add_function('ConnectWithoutContext',
'void',
[param('std::string', 'path'), param('ns3::CallbackBase const &', 'cb')])
## config.h (module 'core'): extern void ns3::Config::Disconnect(std::string path, ns3::CallbackBase const & cb) [free function]
module.add_function('Disconnect',
'void',
[param('std::string', 'path'), param('ns3::CallbackBase const &', 'cb')])
## config.h (module 'core'): extern void ns3::Config::DisconnectWithoutContext(std::string path, ns3::CallbackBase const & cb) [free function]
module.add_function('DisconnectWithoutContext',
'void',
[param('std::string', 'path'), param('ns3::CallbackBase const &', 'cb')])
## config.h (module 'core'): extern ns3::Ptr<ns3::Object> ns3::Config::GetRootNamespaceObject(uint32_t i) [free function]
module.add_function('GetRootNamespaceObject',
'ns3::Ptr< ns3::Object >',
[param('uint32_t', 'i')])
## config.h (module 'core'): extern uint32_t ns3::Config::GetRootNamespaceObjectN() [free function]
module.add_function('GetRootNamespaceObjectN',
'uint32_t',
[])
## config.h (module 'core'): extern ns3::Config::MatchContainer ns3::Config::LookupMatches(std::string path) [free function]
module.add_function('LookupMatches',
'ns3::Config::MatchContainer',
[param('std::string', 'path')])
## config.h (module 'core'): extern void ns3::Config::RegisterRootNamespaceObject(ns3::Ptr<ns3::Object> obj) [free function]
module.add_function('RegisterRootNamespaceObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'obj')])
## config.h (module 'core'): extern void ns3::Config::Reset() [free function]
module.add_function('Reset',
'void',
[])
## config.h (module 'core'): extern void ns3::Config::Set(std::string path, ns3::AttributeValue const & value) [free function]
module.add_function('Set',
'void',
[param('std::string', 'path'), param('ns3::AttributeValue const &', 'value')])
## config.h (module 'core'): extern void ns3::Config::SetDefault(std::string name, ns3::AttributeValue const & value) [free function]
module.add_function('SetDefault',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## config.h (module 'core'): extern bool ns3::Config::SetDefaultFailSafe(std::string name, ns3::AttributeValue const & value) [free function]
module.add_function('SetDefaultFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## config.h (module 'core'): extern void ns3::Config::SetGlobal(std::string name, ns3::AttributeValue const & value) [free function]
module.add_function('SetGlobal',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## config.h (module 'core'): extern bool ns3::Config::SetGlobalFailSafe(std::string name, ns3::AttributeValue const & value) [free function]
module.add_function('SetGlobalFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## config.h (module 'core'): extern void ns3::Config::UnregisterRootNamespaceObject(ns3::Ptr<ns3::Object> obj) [free function]
module.add_function('UnregisterRootNamespaceObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'obj')])
return
def register_functions_ns3_FatalImpl(module, root_module):
## fatal-impl.h (module 'core'): extern void ns3::FatalImpl::FlushStreams() [free function]
module.add_function('FlushStreams',
'void',
[])
## fatal-impl.h (module 'core'): extern void ns3::FatalImpl::RegisterStream(std::ostream * stream) [free function]
module.add_function('RegisterStream',
'void',
[param('std::ostream *', 'stream')])
## fatal-impl.h (module 'core'): extern void ns3::FatalImpl::UnregisterStream(std::ostream * stream) [free function]
module.add_function('UnregisterStream',
'void',
[param('std::ostream *', 'stream')])
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def register_functions_ns3_SystemPath(module, root_module):
## system-path.h (module 'core'): extern std::string ns3::SystemPath::Append(std::string left, std::string right) [free function]
module.add_function('Append',
'std::string',
[param('std::string', 'left'), param('std::string', 'right')])
## system-path.h (module 'core'): extern std::string ns3::SystemPath::FindSelfDirectory() [free function]
module.add_function('FindSelfDirectory',
'std::string',
[])
## system-path.h (module 'core'): extern std::string ns3::SystemPath::Join(std::_List_const_iterator<std::basic_string<char, std::char_traits<char>, std::allocator<char> > > begin, std::_List_const_iterator<std::basic_string<char, std::char_traits<char>, std::allocator<char> > > end) [free function]
module.add_function('Join',
'std::string',
[param('std::_List_const_iterator< std::basic_string< char, std::char_traits< char >, std::allocator< char > > >', 'begin'), param('std::_List_const_iterator< std::basic_string< char, std::char_traits< char >, std::allocator< char > > >', 'end')])
## system-path.h (module 'core'): extern void ns3::SystemPath::MakeDirectories(std::string path) [free function]
module.add_function('MakeDirectories',
'void',
[param('std::string', 'path')])
## system-path.h (module 'core'): extern std::string ns3::SystemPath::MakeTemporaryDirectoryName() [free function]
module.add_function('MakeTemporaryDirectoryName',
'std::string',
[])
## system-path.h (module 'core'): extern std::list<std::string, std::allocator<std::string> > ns3::SystemPath::ReadFiles(std::string path) [free function]
module.add_function('ReadFiles',
'std::list< std::string >',
[param('std::string', 'path')])
## system-path.h (module 'core'): extern std::list<std::string, std::allocator<std::string> > ns3::SystemPath::Split(std::string path) [free function]
module.add_function('Split',
'std::list< std::string >',
[param('std::string', 'path')])
return
def register_functions_ns3_TracedValueCallback(module, root_module):
return
def register_functions_ns3_internal(module, root_module):
## double.h (module 'core'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::internal::MakeDoubleChecker(double min, double max, std::string name) [free function]
module.add_function('MakeDoubleChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[param('double', 'min'), param('double', 'max'), param('std::string', 'name')])
## integer.h (module 'core'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::internal::MakeIntegerChecker(int64_t min, int64_t max, std::string name) [free function]
module.add_function('MakeIntegerChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[param('int64_t', 'min'), param('int64_t', 'max'), param('std::string', 'name')])
## uinteger.h (module 'core'): extern ns3::Ptr<ns3::AttributeChecker const> ns3::internal::MakeUintegerChecker(uint64_t min, uint64_t max, std::string name) [free function]
module.add_function('MakeUintegerChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[param('uint64_t', 'min'), param('uint64_t', 'max'), param('std::string', 'name')])
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
| gpl-2.0 |
projectatomic/atomic-reactor | atomic_reactor/utils/odcs.py | 1 | 7754 | """
Copyright (c) 2017, 2019 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import absolute_import
from atomic_reactor.util import get_retrying_requests_session
from textwrap import dedent
import json
import logging
import time
logger = logging.getLogger(__name__)
MULTILIB_METHOD_DEFAULT = ['devel', 'runtime']
class ODCSClient(object):
OIDC_TOKEN_HEADER = 'Authorization'
OIDC_TOKEN_TYPE = 'Bearer'
def __init__(self, url, insecure=False, token=None, cert=None, timeout=None):
if url.endswith('/'):
self.url = url
else:
self.url = url + '/'
self.timeout = 3600 if timeout is None else timeout
self._setup_session(insecure=insecure, token=token, cert=cert)
def _setup_session(self, insecure, token, cert):
# method_whitelist=False allows retrying non-idempotent methods like POST
session = get_retrying_requests_session(method_whitelist=False)
session.verify = not insecure
if token:
session.headers[self.OIDC_TOKEN_HEADER] = '%s %s' % (self.OIDC_TOKEN_TYPE, token)
if cert:
session.cert = cert
self.session = session
def start_compose(self, source_type, source, packages=None, sigkeys=None, arches=None,
flags=None, multilib_arches=None, multilib_method=None,
modular_koji_tags=None):
"""Start a new ODCS compose
:param source_type: str, the type of compose to request (tag, module, pulp)
:param source: str, if source_type "tag" is used, the name of the Koji tag
to use when retrieving packages to include in compose;
if source_type "module", white-space separated NAME-STREAM or
NAME-STREAM-VERSION list of modules to include in compose;
if source_type "pulp", white-space separated list of context-sets
to include in compose
:param packages: list<str>, packages which should be included in a compose. Only
relevant when source_type "tag" is used.
:param sigkeys: list<str>, IDs of signature keys. Only packages signed by one of
these keys will be included in a compose.
:param arches: list<str>, List of additional Koji arches to build this compose for.
By default, the compose is built only for "x86_64" arch.
:param multilib_arches: list<str>, List of Koji arches to build as multilib in this
compose. By default, no arches are built as multilib.
:param multilib_method: list<str>, list of methods to determine which packages should
be included in a multilib compose. Defaults to none, but the value
of ['devel', 'runtime] will be passed to ODCS if multilib_arches is
not empty and no mulitlib_method value is provided.
:param modular_koji_tags: list<str>, the koji tags which are tagged to builds from the
modular Koji Content Generator. Builds with matching tags will be
included in the compose.
:return: dict, status of compose being created by request.
"""
body = {
'source': {
'type': source_type,
'source': source
}
}
if source_type == "tag" and not modular_koji_tags:
body['source']['packages'] = packages or []
if sigkeys is not None:
body['source']['sigkeys'] = sigkeys
if flags is not None:
body['flags'] = flags
if arches is not None:
body['arches'] = arches
if multilib_arches:
body['multilib_arches'] = multilib_arches
body['multilib_method'] = multilib_method or MULTILIB_METHOD_DEFAULT
if modular_koji_tags:
body['source']['modular_koji_tags'] = modular_koji_tags
logger.info("Starting compose: %s", body)
response = self.session.post('{}composes/'.format(self.url),
json=body)
response.raise_for_status()
return response.json()
def renew_compose(self, compose_id, sigkeys=None):
"""Renew, or extend, existing compose
If the compose has already been removed, ODCS creates a new compose.
Otherwise, it extends the time_to_expire of existing compose. In most
cases, caller should assume the compose ID will change.
:param compose_id: int, compose ID to renew
:param sigkeys: list, new signing intent keys to regenerate compose with
:return: dict, status of compose being renewed.
"""
params = {}
if sigkeys is not None:
params['sigkeys'] = sigkeys
logger.info("Renewing compose %d", compose_id)
response = self.session.patch('{}composes/{}'.format(self.url, compose_id),
json=params)
response.raise_for_status()
response_json = response.json()
compose_id = response_json['id']
logger.info("Renewed compose is %d", compose_id)
return response_json
def wait_for_compose(self, compose_id,
burst_retry=1,
burst_length=30,
slow_retry=10):
"""Wait for compose request to finalize
:param compose_id: int, compose ID to wait for
:param burst_retry: int, seconds to wait between retries prior to exceeding
the burst length
:param burst_length: int, seconds to switch to slower retry period
:param slow_retry: int, seconds to wait between retries after exceeding
the burst length
:return: dict, updated status of compose.
:raise RuntimeError: if state_name becomes 'failed'
"""
logger.debug("Getting compose information for information for compose_id=%s",
compose_id)
url = '{}composes/{}'.format(self.url, compose_id)
start_time = time.time()
while True:
response = self.session.get(url)
response.raise_for_status()
response_json = response.json()
if response_json['state_name'] == 'failed':
state_reason = response_json.get('state_reason', 'Unknown')
logger.error(dedent("""\
Compose %s failed: %s
Details: %s
"""), compose_id, state_reason, json.dumps(response_json, indent=4))
raise RuntimeError('Failed request for compose_id={}: {}'
.format(compose_id, state_reason))
if response_json['state_name'] not in ['wait', 'generating']:
logger.debug("Retrieved compose information for compose_id=%s: %s",
compose_id, json.dumps(response_json, indent=4))
return response_json
elapsed = time.time() - start_time
if elapsed > self.timeout:
raise RuntimeError("Retrieving %s timed out after %s seconds" %
(url, self.timeout))
else:
logger.debug("Retrying request compose_id=%s, elapsed_time=%s",
compose_id, elapsed)
if elapsed > burst_length:
time.sleep(slow_retry)
else:
time.sleep(burst_retry)
| bsd-3-clause |
potash/scikit-learn | sklearn/neighbors/tests/test_ball_tree.py | 159 | 10196 | import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V = rng.rand(3, 3)
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=np.random.random(DIMENSION)),
'wminkowski': dict(p=3, w=np.random.random(DIMENSION)),
'mahalanobis': dict(V=V)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
np.random.seed(0)
X = np.random.random((40, 10)).round(0)
Y = np.random.random((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
np.random.seed(0)
X = (4 * np.random.random((40, 10))).round(0)
Y = (4 * np.random.random((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_ball_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
np.random.seed(0)
X = np.random.random((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
np.random.seed(0)
X = 2 * np.pi * np.random.random((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
| bsd-3-clause |
manojgudi/sandhi | modules/gr36/gnuradio-core/src/python/gnuradio/gr/qa_pipe_fittings.py | 18 | 4143 | #!/usr/bin/env python
#
# Copyright 2005,2007,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
if 0:
import os
print "pid =", os.getpid()
raw_input("Attach, then press Enter to continue")
def calc_expected_result(src_data, n):
assert (len(src_data) % n) == 0
result = [list() for x in range(n)]
#print "len(result) =", len(result)
for i in xrange(len(src_data)):
(result[i % n]).append(src_data[i])
return [tuple(x) for x in result]
class test_pipe_fittings(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block ()
def tearDown(self):
self.tb = None
def test_001(self):
"""
Test stream_to_streams.
"""
n = 8
src_len = n * 8
src_data = range(src_len)
expected_results = calc_expected_result(src_data, n)
#print "expected results: ", expected_results
src = gr.vector_source_i(src_data)
op = gr.stream_to_streams(gr.sizeof_int, n)
self.tb.connect(src, op)
dsts = []
for i in range(n):
dst = gr.vector_sink_i()
self.tb.connect((op, i), (dst, 0))
dsts.append(dst)
self.tb.run()
for d in range(n):
self.assertEqual(expected_results[d], dsts[d].data())
def test_002(self):
"""
Test streams_to_stream (using stream_to_streams).
"""
n = 8
src_len = n * 8
src_data = tuple(range(src_len))
expected_results = src_data
src = gr.vector_source_i(src_data)
op1 = gr.stream_to_streams(gr.sizeof_int, n)
op2 = gr.streams_to_stream(gr.sizeof_int, n)
dst = gr.vector_sink_i()
self.tb.connect(src, op1)
for i in range(n):
self.tb.connect((op1, i), (op2, i))
self.tb.connect(op2, dst)
self.tb.run()
self.assertEqual(expected_results, dst.data())
def test_003(self):
"""
Test streams_to_vector (using stream_to_streams & vector_to_stream).
"""
n = 8
src_len = n * 8
src_data = tuple(range(src_len))
expected_results = src_data
src = gr.vector_source_i(src_data)
op1 = gr.stream_to_streams(gr.sizeof_int, n)
op2 = gr.streams_to_vector(gr.sizeof_int, n)
op3 = gr.vector_to_stream(gr.sizeof_int, n)
dst = gr.vector_sink_i()
self.tb.connect(src, op1)
for i in range(n):
self.tb.connect((op1, i), (op2, i))
self.tb.connect(op2, op3, dst)
self.tb.run()
self.assertEqual(expected_results, dst.data())
def test_004(self):
"""
Test vector_to_streams.
"""
n = 8
src_len = n * 8
src_data = tuple(range(src_len))
expected_results = src_data
src = gr.vector_source_i(src_data)
op1 = gr.stream_to_vector(gr.sizeof_int, n)
op2 = gr.vector_to_streams(gr.sizeof_int, n)
op3 = gr.streams_to_stream(gr.sizeof_int, n)
dst = gr.vector_sink_i()
self.tb.connect(src, op1, op2)
for i in range(n):
self.tb.connect((op2, i), (op3, i))
self.tb.connect(op3, dst)
self.tb.run()
self.assertEqual(expected_results, dst.data())
if __name__ == '__main__':
gr_unittest.run(test_pipe_fittings, "test_pipe_fittings.xml")
| gpl-3.0 |
a7xtony1/plugin.video.ELECTROMERIDAtv | modules/libraries/subtitles.py | 5 | 5236 | # -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
import os
import zlib
import base64
import codecs
import xmlrpclib
import control
import xbmc
langDict = {'Afrikaans': 'afr', 'Albanian': 'alb', 'Arabic': 'ara', 'Armenian': 'arm', 'Basque': 'baq', 'Bengali': 'ben', 'Bosnian': 'bos', 'Breton': 'bre', 'Bulgarian': 'bul', 'Burmese': 'bur', 'Catalan': 'cat', 'Chinese': 'chi', 'Croatian': 'hrv', 'Czech': 'cze', 'Danish': 'dan', 'Dutch': 'dut', 'English': 'eng', 'Esperanto': 'epo', 'Estonian': 'est', 'Finnish': 'fin', 'French': 'fre', 'Galician': 'glg', 'Georgian': 'geo', 'German': 'ger', 'Greek': 'ell', 'Hebrew': 'heb', 'Hindi': 'hin', 'Hungarian': 'hun', 'Icelandic': 'ice', 'Indonesian': 'ind', 'Italian': 'ita', 'Japanese': 'jpn', 'Kazakh': 'kaz', 'Khmer': 'khm', 'Korean': 'kor', 'Latvian': 'lav', 'Lithuanian': 'lit', 'Luxembourgish': 'ltz', 'Macedonian': 'mac', 'Malay': 'may', 'Malayalam': 'mal', 'Manipuri': 'mni', 'Mongolian': 'mon', 'Montenegrin': 'mne', 'Norwegian': 'nor', 'Occitan': 'oci', 'Persian': 'per', 'Polish': 'pol', 'Portuguese': 'por,pob', 'Portuguese(Brazil)': 'pob,por', 'Romanian': 'rum', 'Russian': 'rus', 'Serbian': 'scc', 'Sinhalese': 'sin', 'Slovak': 'slo', 'Slovenian': 'slv', 'Spanish': 'spa', 'Swahili': 'swa', 'Swedish': 'swe', 'Syriac': 'syr', 'Tagalog': 'tgl', 'Tamil': 'tam', 'Telugu': 'tel', 'Thai': 'tha', 'Turkish': 'tur', 'Ukrainian': 'ukr', 'Urdu': 'urd'}
codePageDict = {'ara': 'cp1256', 'ar': 'cp1256', 'ell': 'cp1253', 'el': 'cp1253', 'heb': 'cp1255', 'he': 'cp1255', 'tur': 'cp1254', 'tr': 'cp1254', 'rus': 'cp1251', 'ru': 'cp1251'}
quality = ['bluray', 'hdrip', 'brrip', 'bdrip', 'dvdrip', 'webrip', 'hdtv']
def get(name, imdb, season, episode):
try:
langs = []
try:
try: langs = langDict[control.setting('sublang1')].split(',')
except: langs.append(langDict[control.setting('sublang1')])
except: pass
try:
try: langs = langs + langDict[control.setting('sublang2')].split(',')
except: langs.append(langDict[control.setting('sublang2')])
except: pass
try: subLang = xbmc.Player().getSubtitles()
except: subLang = ''
if subLang == langs[0]: raise Exception()
server = xmlrpclib.Server('http://api.opensubtitles.org/xml-rpc', verbose=0)
token = server.LogIn('', '', 'en', 'XBMC_Subtitles_v1')['token']
sublanguageid = ','.join(langs) ; imdbid = re.sub('[^0-9]', '', imdb)
if not (season == '' or episode == ''):
result = server.SearchSubtitles(token, [{'sublanguageid': sublanguageid, 'imdbid': imdbid, 'season': season, 'episode': episode}])['data']
fmt = ['hdtv']
else:
result = server.SearchSubtitles(token, [{'sublanguageid': sublanguageid, 'imdbid': imdbid}])['data']
try: vidPath = xbmc.Player().getPlayingFile()
except: vidPath = ''
fmt = re.split('\.|\(|\)|\[|\]|\s|\-', vidPath)
fmt = [i.lower() for i in fmt]
fmt = [i for i in fmt if i in quality]
filter = []
result = [i for i in result if i['SubSumCD'] == '1']
for lang in langs:
filter += [i for i in result if i['SubLanguageID'] == lang and any(x in i['MovieReleaseName'].lower() for x in fmt)]
filter += [i for i in result if i['SubLanguageID'] == lang and any(x in i['MovieReleaseName'].lower() for x in quality)]
filter += [i for i in result if i['SubLanguageID'] == lang]
try: lang = xbmc.convertLanguage(filter[0]['SubLanguageID'], xbmc.ISO_639_1)
except: lang = filter[0]['SubLanguageID']
content = [filter[0]['IDSubtitleFile'],]
content = server.DownloadSubtitles(token, content)
content = base64.b64decode(content['data'][0]['data'])
content = str(zlib.decompressobj(16+zlib.MAX_WBITS).decompress(content))
subtitle = xbmc.translatePath('special://temp/')
subtitle = os.path.join(subtitle, 'TemporarySubs.%s.srt' % lang)
codepage = codePageDict.get(lang, '')
if codepage and control.setting('autoconvert_utf8') == 'true':
try:
content_encoded = codecs.decode(content, codepage)
content = codecs.encode(content_encoded, 'utf-8')
except:
pass
file = control.openFile(subtitle, 'w')
file.write(str(content))
file.close()
xbmc.sleep(1000)
xbmc.Player().setSubtitles(subtitle)
except:
pass
| gpl-2.0 |
blazewicz/micropython | tests/wipy/pin.py | 65 | 4862 | """
This test need a set of pins which can be set as inputs and have no external
pull up or pull down connected.
GP12 and GP17 must be connected together
"""
from machine import Pin
import os
mch = os.uname().machine
if 'LaunchPad' in mch:
pin_map = ['GP24', 'GP12', 'GP14', 'GP15', 'GP16', 'GP17', 'GP28', 'GP8', 'GP6', 'GP30', 'GP31', 'GP3', 'GP0', 'GP4', 'GP5']
max_af_idx = 15
elif 'WiPy' in mch:
pin_map = ['GP23', 'GP24', 'GP12', 'GP13', 'GP14', 'GP9', 'GP17', 'GP28', 'GP22', 'GP8', 'GP30', 'GP31', 'GP0', 'GP4', 'GP5']
max_af_idx = 15
else:
raise Exception('Board not supported!')
# test initial value
p = Pin('GP12', Pin.IN)
Pin('GP17', Pin.OUT, value=1)
print(p() == 1)
Pin('GP17', Pin.OUT, value=0)
print(p() == 0)
def test_noinit():
for p in pin_map:
pin = Pin(p)
pin.value()
def test_pin_read(pull):
# enable the pull resistor on all pins, then read the value
for p in pin_map:
pin = Pin(p, mode=Pin.IN, pull=pull)
for p in pin_map:
print(pin())
def test_pin_af():
for p in pin_map:
for af in Pin(p).alt_list():
if af[1] <= max_af_idx:
Pin(p, mode=Pin.ALT, alt=af[1])
Pin(p, mode=Pin.ALT_OPEN_DRAIN, alt=af[1])
# test un-initialized pins
test_noinit()
# test with pull-up and pull-down
test_pin_read(Pin.PULL_UP)
test_pin_read(Pin.PULL_DOWN)
# test all constructor combinations
pin = Pin(pin_map[0])
pin = Pin(pin_map[0], mode=Pin.IN)
pin = Pin(pin_map[0], mode=Pin.OUT)
pin = Pin(pin_map[0], mode=Pin.IN, pull=Pin.PULL_DOWN)
pin = Pin(pin_map[0], mode=Pin.IN, pull=Pin.PULL_UP)
pin = Pin(pin_map[0], mode=Pin.OPEN_DRAIN, pull=Pin.PULL_UP)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_DOWN)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=None)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.LOW_POWER)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.MED_POWER)
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.HIGH_POWER)
pin = Pin(pin_map[0], mode=Pin.OUT, drive=pin.LOW_POWER)
pin = Pin(pin_map[0], Pin.OUT, Pin.PULL_DOWN)
pin = Pin(pin_map[0], Pin.ALT, Pin.PULL_UP)
pin = Pin(pin_map[0], Pin.ALT_OPEN_DRAIN, Pin.PULL_UP)
test_pin_af() # try the entire af range on all pins
# test pin init and printing
pin = Pin(pin_map[0])
pin.init(mode=Pin.IN)
print(pin)
pin.init(Pin.IN, Pin.PULL_DOWN)
print(pin)
pin.init(mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.LOW_POWER)
print(pin)
pin.init(mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.HIGH_POWER)
print(pin)
# test value in OUT mode
pin = Pin(pin_map[0], mode=Pin.OUT)
pin.value(0)
pin.toggle() # test toggle
print(pin())
pin.toggle() # test toggle again
print(pin())
# test different value settings
pin(1)
print(pin.value())
pin(0)
print(pin.value())
pin.value(1)
print(pin())
pin.value(0)
print(pin())
# test all getters and setters
pin = Pin(pin_map[0], mode=Pin.OUT)
# mode
print(pin.mode() == Pin.OUT)
pin.mode(Pin.IN)
print(pin.mode() == Pin.IN)
# pull
pin.pull(None)
print(pin.pull() == None)
pin.pull(Pin.PULL_DOWN)
print(pin.pull() == Pin.PULL_DOWN)
# drive
pin.drive(Pin.MED_POWER)
print(pin.drive() == Pin.MED_POWER)
pin.drive(Pin.HIGH_POWER)
print(pin.drive() == Pin.HIGH_POWER)
# id
print(pin.id() == pin_map[0])
# all the next ones MUST raise
try:
pin = Pin(pin_map[0], mode=Pin.OUT, pull=Pin.PULL_UP, drive=pin.IN) # incorrect drive value
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], mode=Pin.LOW_POWER, pull=Pin.PULL_UP) # incorrect mode value
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], mode=Pin.IN, pull=Pin.HIGH_POWER) # incorrect pull value
except Exception:
print('Exception')
try:
pin = Pin('A0', Pin.OUT, Pin.PULL_DOWN) # incorrect pin id
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], Pin.IN, Pin.PULL_UP, alt=0) # af specified in GPIO mode
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], Pin.OUT, Pin.PULL_UP, alt=7) # af specified in GPIO mode
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], Pin.ALT, Pin.PULL_UP, alt=0) # incorrect af
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], Pin.ALT_OPEN_DRAIN, Pin.PULL_UP, alt=-1) # incorrect af
except Exception:
print('Exception')
try:
pin = Pin(pin_map[0], Pin.ALT_OPEN_DRAIN, Pin.PULL_UP, alt=16) # incorrect af
except Exception:
print('Exception')
try:
pin.mode(Pin.PULL_UP) # incorrect pin mode
except Exception:
print('Exception')
try:
pin.pull(Pin.OUT) # incorrect pull
except Exception:
print('Exception')
try:
pin.drive(Pin.IN) # incorrect drive strength
except Exception:
print('Exception')
try:
pin.id('ABC') # id cannot be set
except Exception:
print('Exception')
| mit |
liyi193328/seq2seq | seq2seq/contrib/learn/datasets/synthetic_test.py | 110 | 5314 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import numpy as np
from tensorflow.python.platform import test
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.datasets import synthetic
class SyntheticTest(test.TestCase):
"""Test synthetic dataset generation"""
def test_make_dataset(self):
"""Test if the synthetic routine wrapper complains about the name"""
self.assertRaises(ValueError, datasets.make_dataset, name='_non_existing_name')
def test_all_datasets_callable(self):
"""Test if all methods inside the `SYNTHETIC` are callable"""
self.assertIsInstance(datasets.SYNTHETIC, dict)
if len(datasets.SYNTHETIC) > 0:
for name, method in six.iteritems(datasets.SYNTHETIC):
self.assertTrue(callable(method))
def test_circles(self):
"""Test if the circles are generated correctly
Tests:
- return type is `Dataset`
- returned `data` shape is (n_samples, n_features)
- returned `target` shape is (n_samples,)
- set of unique classes range is [0, n_classes)
TODO:
- all points have the same radius, if no `noise` specified
"""
n_samples = 100
n_classes = 2
circ = synthetic.circles(n_samples = n_samples, noise = None, n_classes = n_classes)
self.assertIsInstance(circ, datasets.base.Dataset)
self.assertTupleEqual(circ.data.shape, (n_samples,2))
self.assertTupleEqual(circ.target.shape, (n_samples,))
self.assertSetEqual(set(circ.target), set(range(n_classes)))
def test_circles_replicable(self):
"""Test if the data generation is replicable with a specified `seed`
Tests:
- return the same value if raised with the same seed
- return different values if noise or seed is different
"""
seed = 42
noise = 0.1
circ0 = synthetic.circles(n_samples = 100, noise = noise, n_classes = 2, seed = seed)
circ1 = synthetic.circles(n_samples = 100, noise = noise, n_classes = 2, seed = seed)
np.testing.assert_array_equal(circ0.data, circ1.data)
np.testing.assert_array_equal(circ0.target, circ1.target)
circ1 = synthetic.circles(n_samples = 100, noise = noise, n_classes = 2, seed = seed+1)
self.assertRaises(AssertionError, np.testing.assert_array_equal, circ0.data, circ1.data)
self.assertRaises(AssertionError, np.testing.assert_array_equal, circ0.target, circ1.target)
circ1 = synthetic.circles(n_samples = 100, noise = noise/2., n_classes = 2, seed = seed)
self.assertRaises(AssertionError, np.testing.assert_array_equal, circ0.data, circ1.data)
def test_spirals(self):
"""Test if the circles are generated correctly
Tests:
- if mode is unknown, ValueError is raised
- return type is `Dataset`
- returned `data` shape is (n_samples, n_features)
- returned `target` shape is (n_samples,)
- set of unique classes range is [0, n_classes)
"""
self.assertRaises(ValueError, synthetic.spirals, mode='_unknown_mode_spiral_')
n_samples = 100
modes = ('archimedes', 'bernoulli', 'fermat')
for mode in modes:
spir = synthetic.spirals(n_samples = n_samples, noise = None, mode = mode)
self.assertIsInstance(spir, datasets.base.Dataset)
self.assertTupleEqual(spir.data.shape, (n_samples,2))
self.assertTupleEqual(spir.target.shape, (n_samples,))
self.assertSetEqual(set(spir.target), set(range(2)))
def test_spirals_replicable(self):
"""Test if the data generation is replicable with a specified `seed`
Tests:
- return the same value if raised with the same seed
- return different values if noise or seed is different
"""
seed = 42
noise = 0.1
modes = ('archimedes', 'bernoulli', 'fermat')
for mode in modes:
spir0 = synthetic.spirals(n_samples = 1000, noise = noise, seed = seed)
spir1 = synthetic.spirals(n_samples = 1000, noise = noise, seed = seed)
np.testing.assert_array_equal(spir0.data, spir1.data)
np.testing.assert_array_equal(spir0.target, spir1.target)
spir1 = synthetic.spirals(n_samples = 1000, noise = noise, seed = seed+1)
self.assertRaises(AssertionError, np.testing.assert_array_equal, spir0.data, spir1.data)
self.assertRaises(AssertionError, np.testing.assert_array_equal, spir0.target, spir1.target)
spir1 = synthetic.spirals(n_samples = 1000, noise = noise/2., seed = seed)
self.assertRaises(AssertionError, np.testing.assert_array_equal, spir0.data, spir1.data)
if __name__ == "__main__":
test.main()
| apache-2.0 |
Daniel-CA/commission | sale_commission/wizard/wizard_invoice.py | 1 | 2593 | # -*- coding: utf-8 -*-
# © 2011 Pexego Sistemas Informáticos (<http://www.pexego.es>)
# © 2015 Pedro M. Baeza (<http://www.serviciosbaeza.com>)
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, fields, api, _
class SaleCommissionMakeInvoice(models.TransientModel):
_name = 'sale.commission.make.invoice'
def _default_journal(self):
return self.env['account.journal'].search(
[('type', '=', 'purchase')])[:1]
def _default_refund_journal(self):
return self.env['account.journal'].search(
[('type', '=', 'purchase_refund')])[:1]
def _default_settlements(self):
return self.env.context.get('settlement_ids', [])
def _default_from_settlement(self):
return bool(self.env.context.get('settlement_ids'))
journal = fields.Many2one(
comodel_name='account.journal', required=True,
domain="[('type', '=', 'purchase')]",
default=_default_journal)
refund_journal = fields.Many2one(
string='Refund Journal',
comodel_name='account.journal', required=True,
domain="[('type', '=', 'purchase_refund')]",
default=_default_refund_journal)
product = fields.Many2one(
string='Product for invoicing',
comodel_name='product.product', required=True)
settlements = fields.Many2many(
comodel_name='sale.commission.settlement',
relation="sale_commission_make_invoice_settlement_rel",
column1='wizard_id', column2='settlement_id',
domain="[('state', '=', 'settled')]",
default=_default_settlements)
from_settlement = fields.Boolean(default=_default_from_settlement)
date = fields.Date()
@api.multi
def button_create(self):
self.ensure_one()
if not self.settlements:
self.settlements = self.env['sale.commission.settlement'].search(
[('state', '=', 'settled'), ('agent_type', '=', 'agent')])
self.settlements.make_invoices(
self.journal, self.refund_journal, self.product, date=self.date)
# go to results
if len(self.settlements):
return {
'name': _('Created Invoices'),
'type': 'ir.actions.act_window',
'views': [[False, 'list'], [False, 'form']],
'res_model': 'account.invoice',
'domain': [
['id', 'in', [x.invoice.id for x in self.settlements]],
],
}
else:
return {'type': 'ir.actions.act_window_close'}
| agpl-3.0 |
QGuLL/samba | python/samba/tests/kcc/__init__.py | 22 | 3381 | # Unix SMB/CIFS implementation. Tests for samba.kcc core.
# Copyright (C) Andrew Bartlett 2015
#
# Written by Douglas Bagnall <douglas.bagnall@catalyst.net.nz>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Tests for samba.kcc"""
import samba
import os
import time
from tempfile import mkdtemp
import samba.tests
from samba import kcc
from samba import ldb
from samba.dcerpc import misc
from samba.param import LoadParm
from samba.credentials import Credentials
from samba.samdb import SamDB
unix_now = int(time.time())
unix_once_upon_a_time = 1000000000 #2001-09-09
ENV_DSAS = {
'ad_dc_ntvfs' : ['CN=LOCALDC,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,DC=samba,DC=example,DC=com'],
'fl2000dc': ['CN=DC5,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,DC=samba2000,DC=example,DC=com'],
'fl2003dc': ['CN=DC6,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,DC=samba2003,DC=example,DC=com'],
'fl2008r2dc': ['CN=DC7,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,DC=samba2008r2,DC=example,DC=com'],
'promoted_dc': ['CN=PROMOTEDVDC,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,DC=samba,DC=example,DC=com',
'CN=LOCALDC,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,DC=samba,DC=example,DC=com'],
'vampire_dc': ['CN=LOCALDC,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,DC=samba,DC=example,DC=com',
'CN=LOCALVAMPIREDC,CN=Servers,CN=Default-First-Site-Name,CN=Sites,CN=Configuration,DC=samba,DC=example,DC=com'],
}
class KCCTests(samba.tests.TestCase):
def setUp(self):
super(KCCTests, self).setUp()
self.lp = LoadParm()
self.creds = Credentials()
self.creds.guess(self.lp)
self.creds.set_username(os.environ["USERNAME"])
self.creds.set_password(os.environ["PASSWORD"])
def test_list_dsas(self):
my_kcc = kcc.KCC(unix_now, False, False, False, False)
my_kcc.load_samdb("ldap://%s" % os.environ["SERVER"],
self.lp, self.creds)
dsas = my_kcc.list_dsas()
env = os.environ['TEST_ENV']
for expected_dsa in ENV_DSAS[env]:
self.assertIn(expected_dsa, dsas)
def test_verify(self):
"""check that the KCC generates graphs that pass its own verify
option. This is not a spectacular acheivement when there are
only a couple of nodes to connect, but it shows something.
"""
my_kcc = kcc.KCC(unix_now, readonly=True, verify=True,
debug=False, dot_file_dir=None)
my_kcc.run("ldap://%s" % os.environ["SERVER"],
self.lp, self.creds,
attempt_live_connections=False)
| gpl-3.0 |
VentureCranial/system-status-dashboard | ssd/urls.py | 1 | 5628 | #
# Copyright 2013 - Tom Alessi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import patterns, include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Main Dashboard
url(r'^$', 'ssd.dashboard.views.main.index'),
# Escalation Path
url(r'^escalation$', 'ssd.dashboard.views.escalation.escalation'),
# Search
url(r'^search/events$', 'ssd.dashboard.views.search.events'),
url(r'^search/graph$', 'ssd.dashboard.views.search.graph'),
# Preferences
url(r'^prefs/set_timezone$', 'ssd.dashboard.views.prefs.set_timezone'),
url(r'^prefs/jump$', 'ssd.dashboard.views.prefs.jump'),
# Incident Events
url(r'^i_detail$', 'ssd.dashboard.views.incidents.i_detail'),
# Maintenance Events
url(r'^m_detail$', 'ssd.dashboard.views.maintenance.m_detail'),
# Incident Reports
url(r'^ireport$', 'ssd.dashboard.views.ireport.ireport'),
# -- from here down, it's all admin functionality -- #
# User login
url(r'^accounts/login/$', 'django.contrib.auth.views.login'),
# User logout
url(r'^accounts/logout/$', 'django.contrib.auth.views.logout',{'next_page': '/'}),
# Standard Django admin site
url(r'^djadmin/', include(admin.site.urls)),
# SSD Admin
url(r'^admin$', 'ssd.dashboard.views.admin.main'),
url(r'^admin/admin_config$', 'ssd.dashboard.views.admin.admin_config'),
url(r'^admin/cache_status$', 'ssd.dashboard.views.admin.cache_status'),
# Incident Events (admin functionality)
url(r'^admin/incident$', 'ssd.dashboard.views.incidents.incident'),
url(r'^admin/i_delete$', 'ssd.dashboard.views.incidents.i_delete'),
url(r'^admin/i_list$', 'ssd.dashboard.views.incidents.i_list'),
url(r'^admin/i_update$', 'ssd.dashboard.views.incidents.i_update'),
url(r'^admin/i_update_delete$', 'ssd.dashboard.views.incidents.i_update_delete'),
# Maintenance Events (admin functionality)
url(r'^admin/maintenance$', 'ssd.dashboard.views.maintenance.maintenance'),
url(r'^admin/m_delete$', 'ssd.dashboard.views.maintenance.m_delete'),
url(r'^admin/m_list$', 'ssd.dashboard.views.maintenance.m_list'),
url(r'^admin/m_email$', 'ssd.dashboard.views.maintenance.m_email'),
url(r'^admin/m_update$', 'ssd.dashboard.views.maintenance.m_update'),
url(r'^admin/m_update_delete$', 'ssd.dashboard.views.maintenance.m_update_delete'),
# Email Configuration (admin functionality)
url(r'^admin/email_config$', 'ssd.dashboard.views.email.email_config'),
url(r'^admin/email_recipients$', 'ssd.dashboard.views.email.email_recipients'),
url(r'^admin/recipient_delete$', 'ssd.dashboard.views.email.recipient_delete'),
url(r'^admin/recipient_modify$', 'ssd.dashboard.views.email.recipient_modify'),
# Services Configuration (admin functionality)
url(r'^admin/services$', 'ssd.dashboard.views.services.services'),
url(r'^admin/service_delete$', 'ssd.dashboard.views.services.service_delete'),
url(r'^admin/service_modify$', 'ssd.dashboard.views.services.service_modify'),
# Messages Configuration (admin functionality)
url(r'^admin/messages_config$', 'ssd.dashboard.views.messages.messages_config'),
# Logo Configuration (admin functionality)
url(r'^admin/logo_config$', 'ssd.dashboard.views.logo.logo_config'),
# Url Configuration (admin functionality)
url(r'^admin/systemurl_config$', 'ssd.dashboard.views.systemurl.systemurl_config'),
# Incident Reports (admin functionality)
url(r'^admin/ireport_config$', 'ssd.dashboard.views.ireport.ireport_config'),
url(r'^admin/ireport_detail$', 'ssd.dashboard.views.ireport.ireport_detail'),
url(r'^admin/ireport_delete$', 'ssd.dashboard.views.ireport.ireport_delete'),
url(r'^admin/ireport_list$', 'ssd.dashboard.views.ireport.ireport_list'),
# Escalation
url(r'^admin/escalation_config$', 'ssd.dashboard.views.escalation.escalation_config'),
url(r'^admin/escalation_contacts$', 'ssd.dashboard.views.escalation.escalation_contacts'),
url(r'^admin/contact_switch$', 'ssd.dashboard.views.escalation.contact_switch'),
url(r'^admin/contact_delete$', 'ssd.dashboard.views.escalation.contact_delete'),
url(r'^admin/contact_modify$', 'ssd.dashboard.views.escalation.contact_modify'),
# Events
url(r'^admin/update_modify$', 'ssd.dashboard.views.events.update_modify'),
)
urlpatterns += staticfiles_urlpatterns()
| apache-2.0 |
quattor/aquilon | tests/broker/test_constraints_network.py | 1 | 7564 | #!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2013,2014,2015,2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestNetworkConstraints(TestBrokerCommand):
def test_100_add_testnet(self):
self.net.allocate_network(self, "bunker_mismatch1", 24, "unknown",
"building", "ut")
self.net.allocate_network(self, "bunker_mismatch2", 24, "unknown",
"bunker", "bucket1.ut")
def test_110_mismatch_1(self):
# Rack is bunkerized, network is not
net = self.net["bunker_mismatch1"]
ip = net.usable[0]
self.dsdb_expect_add("mismatch1.aqd-unittest.ms.com", ip,
"eth0_bunkertest",
primary="aquilon61.aqd-unittest.ms.com")
command = ["add_interface_address",
"--machine", "aquilon61.aqd-unittest.ms.com",
"--interface", "eth0", "--label", "bunkertest",
"--ip", ip, "--fqdn", "mismatch1.aqd-unittest.ms.com"]
err = self.statustest(command)
self.matchoutput(err,
"Bunker violation: rack ut9 is inside bunker "
"bucket2.ut, but network %s [%s] "
"is not bunkerized." % (net.name, net),
command)
self.dsdb_verify()
def test_120_mismatch_2(self):
# Rack and network has different bunkers
net = self.net["bunker_mismatch2"]
ip = net.usable[0]
self.dsdb_expect_add("mismatch2.aqd-unittest.ms.com", ip,
"eth0_bunkertest",
primary="aquilon62.aqd-unittest.ms.com")
command = ["add_interface_address",
"--machine", "aquilon62.aqd-unittest.ms.com",
"--interface", "eth0", "--label", "bunkertest",
"--ip", ip, "--fqdn", "mismatch2.aqd-unittest.ms.com"]
err = self.statustest(command)
self.matchoutput(err,
"Bunker violation: rack ut9 is inside bunker "
"bucket2.ut, but network %s [%s] is inside "
"bunker bucket1.ut." % (net.name, net),
command)
self.dsdb_verify()
def test_130_mismatch_3(self):
# Network is bunkerized, rack is not
net = self.net["bunker_mismatch2"]
ip = net.usable[1]
self.dsdb_expect_add("mismatch3.aqd-unittest.ms.com", ip,
"eth0_bunkertest",
primary="server9.aqd-unittest.ms.com")
command = ["add_interface_address",
"--machine", "server9.aqd-unittest.ms.com",
"--interface", "eth0", "--label", "bunkertest",
"--ip", net.usable[1],
"--fqdn", "mismatch3.aqd-unittest.ms.com"]
err = self.statustest(command)
self.matchoutput(err,
"Bunker violation: network %s [%s] is "
"inside bunker bucket1.ut, but rack ut8 is not inside "
"a bunker." % (net.name, net),
command)
self.dsdb_verify()
def test_200_show_bunker_violations(self):
command = ["show_bunker_violations"]
out = self.commandtest(command)
self.searchoutput(out,
r"Warning: Rack ut8 is not part of a bunker, but it "
r"uses bunkerized networks:\s*"
r"BUCKET1: server9\.aqd-unittest\.ms\.com/eth0\s*"
r"BUCKET2: aquilon91\.aqd-unittest\.ms\.com/eth0, server9\.aqd-unittest\.ms\.com/eth0",
command)
self.matchoutput(out, "aq update rack --rack np7 --building np",
command)
self.searchoutput(out,
r"Warning: Rack ut9 is part of bunker bucket2.ut, but "
r"also has networks from:\s*"
r"\(No bucket\): aquilon61\.aqd-unittest\.ms\.com/eth0\s*"
r"BUCKET1: aquilon62\.aqd-unittest\.ms\.com/eth0",
command)
def test_210_show_bunker_violations_management(self):
command = ["show_bunker_violations", "--management_interfaces"]
out = self.commandtest(command)
self.searchoutput(out,
r"Warning: Rack ut8 is not part of a bunker, but it "
r"uses bunkerized networks:\s*"
r"BUCKET1: server9\.aqd-unittest\.ms\.com/eth0\s*"
r"BUCKET2: aquilon91\.aqd-unittest\.ms\.com/eth0, server9\.aqd-unittest\.ms\.com/eth0",
command)
self.matchoutput(out, "aq update rack --rack np7 --building np",
command)
self.searchoutput(out,
r"Warning: Rack ut9 is part of bunker bucket2.ut, but "
r"also has networks from:\s*"
r"\(No bucket\): aquilon61\.aqd-unittest\.ms\.com/eth0, "
r"aquilon61.aqd-unittest.ms.com/ilo, .*$\s*"
r"BUCKET1: aquilon62\.aqd-unittest\.ms\.com/eth0",
command)
def test_300_mismatch1_cleanup(self):
net = self.net["bunker_mismatch1"]
ip = net.usable[0]
self.dsdb_expect_delete(ip)
command = ["del_interface_address",
"--machine", "aquilon61.aqd-unittest.ms.com",
"--interface", "eth0", "--label", "bunkertest"]
self.noouttest(command)
self.dsdb_verify()
def test_300_mismatch2_cleanup(self):
net = self.net["bunker_mismatch2"]
ip = net.usable[0]
self.dsdb_expect_delete(ip)
command = ["del_interface_address",
"--machine", "aquilon62.aqd-unittest.ms.com",
"--interface", "eth0", "--label", "bunkertest"]
self.noouttest(command)
self.dsdb_verify()
def test_300_mismatch3_cleanup(self):
net = self.net["bunker_mismatch2"]
ip = net.usable[1]
self.dsdb_expect_delete(ip)
command = ["del_interface_address",
"--machine", "server9.aqd-unittest.ms.com",
"--interface", "eth0", "--label", "bunkertest"]
self.statustest(command)
self.dsdb_verify()
def test_310_network_cleanup(self):
self.net.dispose_network(self, "bunker_mismatch1")
self.net.dispose_network(self, "bunker_mismatch2")
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestNetworkConstraints)
unittest.TextTestRunner(verbosity=2).run(suite)
| apache-2.0 |
ifduyue/sentry | src/sentry/api/endpoints/organization_member_details.py | 2 | 10426 | from __future__ import absolute_import
from django.db import transaction
from django.db.models import Q
from rest_framework import serializers
from rest_framework.response import Response
from sentry import roles
from sentry.api.bases.organization import (
OrganizationEndpoint, OrganizationPermission)
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import serialize, RoleSerializer, OrganizationMemberWithTeamsSerializer
from sentry.api.serializers.rest_framework import ListField
from sentry.auth.superuser import is_active_superuser
from sentry.models import (
AuditLogEntryEvent, AuthIdentity, AuthProvider, OrganizationMember, OrganizationMemberTeam, Team, TeamStatus)
from sentry.signals import sso_enabled
ERR_NO_AUTH = 'You cannot remove this member with an unauthenticated API request.'
ERR_INSUFFICIENT_ROLE = 'You cannot remove a member who has more access than you.'
ERR_INSUFFICIENT_SCOPE = 'You are missing the member:admin scope.'
ERR_ONLY_OWNER = 'You cannot remove the only remaining owner of the organization.'
ERR_UNINVITABLE = 'You cannot send an invitation to a user who is already a full member.'
def get_allowed_roles(request, organization, member=None):
can_admin = request.access.has_scope('member:admin')
allowed_roles = []
if can_admin and not is_active_superuser(request):
acting_member = member or OrganizationMember.objects.get(
user=request.user,
organization=organization,
)
if member and roles.get(acting_member.role).priority < roles.get(member.role).priority:
can_admin = False
else:
allowed_roles = [
r for r in roles.get_all()
if r.priority <= roles.get(acting_member.role).priority
]
can_admin = bool(allowed_roles)
elif is_active_superuser(request):
allowed_roles = roles.get_all()
return (can_admin, allowed_roles, )
class OrganizationMemberSerializer(serializers.Serializer):
reinvite = serializers.BooleanField()
regenerate = serializers.BooleanField()
role = serializers.ChoiceField(choices=roles.get_choices(), required=True)
teams = ListField(required=False, allow_null=False)
class RelaxedMemberPermission(OrganizationPermission):
scope_map = {
'GET': ['member:read', 'member:write', 'member:admin'],
'POST': ['member:write', 'member:admin'],
'PUT': ['member:write', 'member:admin'],
# DELETE checks for role comparison as you can either remove a member
# with a lower access role, or yourself, without having the req. scope
'DELETE': ['member:read', 'member:write', 'member:admin'],
}
class OrganizationMemberDetailsEndpoint(OrganizationEndpoint):
permission_classes = [RelaxedMemberPermission]
def _get_member(self, request, organization, member_id):
if member_id == 'me':
queryset = OrganizationMember.objects.filter(
organization=organization,
user__id=request.user.id,
user__is_active=True,
)
else:
queryset = OrganizationMember.objects.filter(
Q(user__is_active=True) | Q(user__isnull=True),
organization=organization,
id=member_id,
)
return queryset.select_related('user').get()
def _is_only_owner(self, member):
if member.role != roles.get_top_dog().id:
return False
queryset = OrganizationMember.objects.filter(
organization=member.organization_id,
role=roles.get_top_dog().id,
user__isnull=False,
user__is_active=True,
).exclude(id=member.id)
if queryset.exists():
return False
return True
def _serialize_member(self, member, request, allowed_roles=None):
context = serialize(
member,
serializer=OrganizationMemberWithTeamsSerializer()
)
if request.access.has_scope('member:admin'):
context['invite_link'] = member.get_invite_link()
context['isOnlyOwner'] = self._is_only_owner(member)
context['roles'] = serialize(
roles.get_all(), serializer=RoleSerializer(), allowed_roles=allowed_roles)
return context
def get(self, request, organization, member_id):
"""Currently only returns allowed invite roles for member invite"""
try:
member = self._get_member(request, organization, member_id)
except OrganizationMember.DoesNotExist:
raise ResourceDoesNotExist
_, allowed_roles = get_allowed_roles(request, organization, member)
context = self._serialize_member(member, request, allowed_roles)
return Response(context)
def put(self, request, organization, member_id):
try:
om = self._get_member(request, organization, member_id)
except OrganizationMember.DoesNotExist:
raise ResourceDoesNotExist
serializer = OrganizationMemberSerializer(
data=request.DATA, partial=True)
if not serializer.is_valid():
return Response(status=400)
try:
auth_provider = AuthProvider.objects.get(organization=organization)
auth_provider = auth_provider.get_provider()
except AuthProvider.DoesNotExist:
auth_provider = None
allowed_roles = None
result = serializer.object
# XXX(dcramer): if/when this expands beyond reinvite we need to check
# access level
if result.get('reinvite'):
if om.is_pending:
if result.get('regenerate'):
if request.access.has_scope('member:admin'):
om.update(token=om.generate_token())
else:
return Response({'detail': ERR_INSUFFICIENT_SCOPE}, status=400)
om.send_invite_email()
elif auth_provider and not getattr(om.flags, 'sso:linked'):
om.send_sso_link_email(request.user, auth_provider)
else:
# TODO(dcramer): proper error message
return Response({'detail': ERR_UNINVITABLE}, status=400)
if auth_provider:
sso_enabled.send(organization=organization, sender=request.user)
if result.get('teams'):
# dupe code from member_index
# ensure listed teams are real teams
teams = list(Team.objects.filter(
organization=organization,
status=TeamStatus.VISIBLE,
slug__in=result['teams'],
))
if len(set(result['teams'])) != len(teams):
return Response({'teams': 'Invalid team'}, status=400)
with transaction.atomic():
# teams may be empty
OrganizationMemberTeam.objects.filter(
organizationmember=om).delete()
OrganizationMemberTeam.objects.bulk_create(
[
OrganizationMemberTeam(
team=team, organizationmember=om)
for team in teams
]
)
if result.get('role'):
_, allowed_roles = get_allowed_roles(request, organization)
allowed_role_ids = {r.id for r in allowed_roles}
# A user cannot promote others above themselves
if result['role'] not in allowed_role_ids:
return Response(
{'role': 'You do not have permission to assign the given role.'}, status=403)
# A user cannot demote a superior
if om.role not in allowed_role_ids:
return Response(
{'role': 'You do not have permission to assign a role to the given user.'}, status=403)
if om.user == request.user and (result['role'] != om.role):
return Response(
{'detail': 'You cannot make changes to your own role.'}, status=400)
om.update(role=result['role'])
self.create_audit_entry(
request=request,
organization=organization,
target_object=om.id,
target_user=om.user,
event=AuditLogEntryEvent.MEMBER_EDIT,
data=om.get_audit_log_data(),
)
context = self._serialize_member(om, request, allowed_roles)
return Response(context)
def delete(self, request, organization, member_id):
try:
om = self._get_member(request, organization, member_id)
except OrganizationMember.DoesNotExist:
raise ResourceDoesNotExist
if request.user.is_authenticated() and not is_active_superuser(request):
try:
acting_member = OrganizationMember.objects.get(
organization=organization,
user=request.user,
)
except OrganizationMember.DoesNotExist:
return Response({'detail': ERR_INSUFFICIENT_ROLE}, status=400)
else:
if acting_member != om:
if not request.access.has_scope('member:admin'):
return Response({'detail': ERR_INSUFFICIENT_SCOPE}, status=400)
elif not roles.can_manage(acting_member.role, om.role):
return Response({'detail': ERR_INSUFFICIENT_ROLE}, status=400)
# TODO(dcramer): do we even need this check?
elif not request.access.has_scope('member:admin'):
return Response({'detail': ERR_INSUFFICIENT_SCOPE}, status=400)
if self._is_only_owner(om):
return Response({'detail': ERR_ONLY_OWNER}, status=403)
audit_data = om.get_audit_log_data()
with transaction.atomic():
AuthIdentity.objects.filter(
user=om.user,
auth_provider__organization=organization,
).delete()
om.delete()
self.create_audit_entry(
request=request,
organization=organization,
target_object=om.id,
target_user=om.user,
event=AuditLogEntryEvent.MEMBER_REMOVE,
data=audit_data,
)
return Response(status=204)
| bsd-3-clause |
MrNuggles/HeyBoet-Telegram-Bot | temboo/Library/Highrise/DeletePeople.py | 5 | 3907 | # -*- coding: utf-8 -*-
###############################################################################
#
# DeletePeople
# Deletes a specified contact from your Highrise CRM.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class DeletePeople(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the DeletePeople Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(DeletePeople, self).__init__(temboo_session, '/Library/Highrise/DeletePeople')
def new_input_set(self):
return DeletePeopleInputSet()
def _make_result_set(self, result, path):
return DeletePeopleResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return DeletePeopleChoreographyExecution(session, exec_id, path)
class DeletePeopleInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the DeletePeople
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccountName(self, value):
"""
Set the value of the AccountName input for this Choreo. ((required, string) A valid Highrise account name. This is the first part of the account's URL.)
"""
super(DeletePeopleInputSet, self)._set_input('AccountName', value)
def set_ContactID(self, value):
"""
Set the value of the ContactID input for this Choreo. ((required, string) The ID number of the contact you want to delete. This is used to contruct the URL for the request.)
"""
super(DeletePeopleInputSet, self)._set_input('ContactID', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((required, password) The Highrise account password. Use the value 'X' when specifying an API Key for the Username input.)
"""
super(DeletePeopleInputSet, self)._set_input('Password', value)
def set_Username(self, value):
"""
Set the value of the Username input for this Choreo. ((required, string) A Highrise account username or API Key.)
"""
super(DeletePeopleInputSet, self)._set_input('Username', value)
class DeletePeopleResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the DeletePeople Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Highrise. The delete people API method returns no XML, so this variable will contain no data.)
"""
return self._output.get('Response', None)
class DeletePeopleChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return DeletePeopleResultSet(response, path)
| gpl-3.0 |
dreamsxin/kbengine | kbe/src/lib/python/Tools/scripts/rgrep.py | 112 | 1476 | #! /usr/bin/env python3
"""Reverse grep.
Usage: rgrep [-i] pattern file
"""
import sys
import re
import getopt
def main():
bufsize = 64 * 1024
reflags = 0
opts, args = getopt.getopt(sys.argv[1:], "i")
for o, a in opts:
if o == '-i':
reflags = reflags | re.IGNORECASE
if len(args) < 2:
usage("not enough arguments")
if len(args) > 2:
usage("exactly one file argument required")
pattern, filename = args
try:
prog = re.compile(pattern, reflags)
except re.error as msg:
usage("error in regular expression: %s" % msg)
try:
f = open(filename)
except IOError as msg:
usage("can't open %r: %s" % (filename, msg), 1)
f.seek(0, 2)
pos = f.tell()
leftover = None
while pos > 0:
size = min(pos, bufsize)
pos = pos - size
f.seek(pos)
buffer = f.read(size)
lines = buffer.split("\n")
del buffer
if leftover is None:
if not lines[-1]:
del lines[-1]
else:
lines[-1] = lines[-1] + leftover
if pos > 0:
leftover = lines[0]
del lines[0]
else:
leftover = None
for line in reversed(lines):
if prog.search(line):
print(line)
def usage(msg, code=2):
sys.stdout = sys.stderr
print(msg)
print(__doc__)
sys.exit(code)
if __name__ == '__main__':
main()
| lgpl-3.0 |
openmaraude/APITaxi | APITaxi2/default_settings.py | 1 | 1390 | # SQLALCHEMY_ECHO = True
# Warning is displayed when SQLALCHEMY_TRACK_MODIFICATIONS is the default.
# Future SQLAlchemy version will set this value to False by default anyway.
SQLALCHEMY_TRACK_MODIFICATIONS = False
INFLUXDB_DATABASE = 'taxis'
INFLUXDB_CREATE_DATABASE = False
_ONE_MINUTE = 60
_ONE_HOUR = _ONE_MINUTE * 60
_ONE_DAY = _ONE_HOUR * 24
_SEVEN_DAYS = _ONE_DAY * 7
CELERY_BEAT_SCHEDULE = {
'clean-geoindex-timestamps': {
'task': 'clean_geoindex_timestamps',
# Every 10 minutes
'schedule': _ONE_MINUTE * 10
},
# Every minute, store the list of taxis available the last minute.
'store-active-taxis-last-minute': {
'task': 'store_active_taxis',
'schedule': _ONE_MINUTE,
'args': (1,),
},
# Every hour, store the list of taxis available the last minute.
'store-active-taxis-last-hour': {
'task': 'store_active_taxis',
'schedule': _ONE_HOUR,
'args': (60,)
},
# Every day, store the list of taxis available the last day.
'store-active-taxis-last-day': {
'task': 'store_active_taxis',
'schedule': _ONE_DAY,
'args': (1440,)
},
# Every day, store the list of taxis available the last 7 days.
'store-active-taxis-last-seven-days': {
'task': 'store_active_taxis',
'schedule': _ONE_DAY,
'args': (10080,)
},
}
| agpl-3.0 |
denys-duchier/django | django/core/management/color.py | 43 | 1821 | """
Sets up the terminal color scheme.
"""
import functools
import os
import sys
from django.utils import termcolors
def supports_color():
"""
Return True if the running system's terminal supports color,
and False otherwise.
"""
plat = sys.platform
supported_platform = plat != 'Pocket PC' and (plat != 'win32' or 'ANSICON' in os.environ)
# isatty is not always implemented, #6223.
is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
if not supported_platform or not is_a_tty:
return False
return True
class Style:
pass
def make_style(config_string=''):
"""
Create a Style object from the given config_string.
If config_string is empty django.utils.termcolors.DEFAULT_PALETTE is used.
"""
style = Style()
color_settings = termcolors.parse_color_setting(config_string)
# The nocolor palette has all available roles.
# Use that palette as the basis for populating
# the palette as defined in the environment.
for role in termcolors.PALETTES[termcolors.NOCOLOR_PALETTE]:
if color_settings:
format = color_settings.get(role, {})
style_func = termcolors.make_style(**format)
else:
def style_func(x):
return x
setattr(style, role, style_func)
# For backwards compatibility,
# set style for ERROR_OUTPUT == ERROR
style.ERROR_OUTPUT = style.ERROR
return style
@functools.lru_cache(maxsize=None)
def no_style():
"""
Return a Style object with no color scheme.
"""
return make_style('nocolor')
def color_style():
"""
Return a Style object from the Django color scheme.
"""
if not supports_color():
return no_style()
return make_style(os.environ.get('DJANGO_COLORS', ''))
| bsd-3-clause |
Lekanich/intellij-community | python/lib/Lib/ast.py | 139 | 11347 | # -*- coding: utf-8 -*-
"""
ast
~~~
The `ast` module helps Python applications to process trees of the Python
abstract syntax grammar. The abstract syntax itself might change with
each Python release; this module helps to find out programmatically what
the current grammar looks like and allows modifications of it.
An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
a flag to the `compile()` builtin function or by using the `parse()`
function from this module. The result will be a tree of objects whose
classes all inherit from `ast.AST`.
A modified abstract syntax tree can be compiled into a Python code object
using the built-in `compile()` function.
Additionally various helper functions are provided that make working with
the trees simpler. The main intention of the helper functions and this
module in general is to provide an easy to use interface for libraries
that work tightly with the python syntax (template engines for example).
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from _ast import *
from _ast import __version__
def parse(expr, filename='<unknown>', mode='exec'):
"""
Parse an expression into an AST node.
Equivalent to compile(expr, filename, mode, PyCF_ONLY_AST).
"""
return compile(expr, filename, mode, PyCF_ONLY_AST)
def literal_eval(node_or_string):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
and None.
"""
_safe_names = {'None': None, 'True': True, 'False': False}
if isinstance(node_or_string, basestring):
node_or_string = parse(node_or_string, mode='eval')
if isinstance(node_or_string, Expression):
node_or_string = node_or_string.body
def _convert(node):
if isinstance(node, Str):
return node.s
elif isinstance(node, Num):
return node.n
elif isinstance(node, Tuple):
return tuple(map(_convert, node.elts))
elif isinstance(node, List):
return list(map(_convert, node.elts))
elif isinstance(node, Dict):
return dict((_convert(k), _convert(v)) for k, v
in zip(node.keys, node.values))
elif isinstance(node, Name):
if node.id in _safe_names:
return _safe_names[node.id]
raise ValueError('malformed string')
return _convert(node_or_string)
def dump(node, annotate_fields=True, include_attributes=False):
"""
Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
for fields. This makes the code impossible to evaluate, so if evaluation is
wanted *annotate_fields* must be set to False. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to True.
"""
def _format(node):
if isinstance(node, AST):
fields = [(a, _format(b)) for a, b in iter_fields(node)]
rv = '%s(%s' % (node.__class__.__name__, ', '.join(
('%s=%s' % field for field in fields)
if annotate_fields else
(b for a, b in fields)
))
if include_attributes and node._attributes:
rv += fields and ', ' or ' '
rv += ', '.join('%s=%s' % (a, _format(getattr(node, a)))
for a in node._attributes)
return rv + ')'
elif isinstance(node, list):
return '[%s]' % ', '.join(_format(x) for x in node)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def copy_location(new_node, old_node):
"""
Copy source location (`lineno` and `col_offset` attributes) from
*old_node* to *new_node* if possible, and return *new_node*.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node
def fix_missing_locations(node):
"""
When you compile a node tree with compile(), the compiler expects lineno and
col_offset attributes for every node that supports them. This is rather
tedious to fill in for generated nodes, so this helper adds these attributes
recursively where not already set, by setting them to the values of the
parent node. It works recursively starting at *node*.
"""
def _fix(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
else:
lineno = node.lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
else:
col_offset = node.col_offset
for child in iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, 1, 0)
return node
def increment_lineno(node, n=1):
"""
Increment the line number of each node in the tree starting at *node* by *n*.
This is useful to "move code" to a different location in a file.
"""
if 'lineno' in node._attributes:
node.lineno = getattr(node, 'lineno', 0) + n
for child in walk(node):
if 'lineno' in child._attributes:
child.lineno = getattr(child, 'lineno', 0) + n
return node
def iter_fields(node):
"""
Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
that is present on *node*.
"""
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
def iter_child_nodes(node):
"""
Yield all direct child nodes of *node*, that is, all fields that are nodes
and all items of fields that are lists of nodes.
"""
for name, field in iter_fields(node):
if isinstance(field, AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, AST):
yield item
def get_docstring(node, clean=True):
"""
Return the docstring for the given node or None if no docstring can
be found. If the node provided does not have docstrings a TypeError
will be raised.
"""
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Expr) and \
isinstance(node.body[0].value, Str):
if clean:
import inspect
return inspect.cleandoc(node.body[0].value.s)
return node.body[0].value.s
def walk(node):
"""
Recursively yield all child nodes of *node*, in no specified order. This is
useful if you only want to modify nodes in place and don't care about the
context.
"""
from collections import deque
todo = deque([node])
while todo:
node = todo.popleft()
todo.extend(iter_child_nodes(node))
yield node
class NodeVisitor(object):
"""
A node visitor base class that walks the abstract syntax tree and calls a
visitor function for every node found. This function may return a value
which is forwarded by the `visit` method.
This class is meant to be subclassed, with the subclass adding visitor
methods.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `visit` method. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def visit(self, node):
"""Visit a node."""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
A :class:`NodeVisitor` subclass that walks the abstract syntax tree and
allows modification of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor methods to replace or remove the old node. If the return value of
the visitor method is ``None``, the node will be removed from its location,
otherwise it is replaced with the return value. The return value may be the
original node in which case no replacement takes place.
Here is an example transformer that rewrites all occurrences of name lookups
(``foo``) to ``data['foo']``::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return copy_location(Subscript(
value=Name(id='data', ctx=Load()),
slice=Index(value=Str(s=node.id)),
ctx=node.ctx
), node)
Keep in mind that if the node you're operating on has child nodes you must
either transform the child nodes yourself or call the :meth:`generic_visit`
method for the node first.
For nodes that were part of a collection of statements (that applies to all
statement nodes), the visitor may also return a list of nodes rather than
just a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.