repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
rwl/PyCIM
|
CIM15/IEC61970/Informative/InfERPSupport/OrgErpPersonRole.py
|
1
|
3445
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Informative.InfCommon.Role import Role
class OrgErpPersonRole(Role):
"""Roles played between Persons and Organisations.Roles played between Persons and Organisations.
"""
def __init__(self, clientID='', ErpPerson=None, ErpOrganisation=None, *args, **kw_args):
"""Initialises a new 'OrgErpPersonRole' instance.
@param clientID: Identifiers of the person held by an organisation, such as a government agency (federal, state, province, city, county), financial institutions, etc.
@param ErpPerson:
@param ErpOrganisation:
"""
#: Identifiers of the person held by an organisation, such as a government agency (federal, state, province, city, county), financial institutions, etc.
self.clientID = clientID
self._ErpPerson = None
self.ErpPerson = ErpPerson
self._ErpOrganisation = None
self.ErpOrganisation = ErpOrganisation
super(OrgErpPersonRole, self).__init__(*args, **kw_args)
_attrs = ["clientID"]
_attr_types = {"clientID": str}
_defaults = {"clientID": ''}
_enums = {}
_refs = ["ErpPerson", "ErpOrganisation"]
_many_refs = []
def getErpPerson(self):
return self._ErpPerson
def setErpPerson(self, value):
if self._ErpPerson is not None:
filtered = [x for x in self.ErpPerson.ErpOrganisationRoles if x != self]
self._ErpPerson._ErpOrganisationRoles = filtered
self._ErpPerson = value
if self._ErpPerson is not None:
if self not in self._ErpPerson._ErpOrganisationRoles:
self._ErpPerson._ErpOrganisationRoles.append(self)
ErpPerson = property(getErpPerson, setErpPerson)
def getErpOrganisation(self):
return self._ErpOrganisation
def setErpOrganisation(self, value):
if self._ErpOrganisation is not None:
filtered = [x for x in self.ErpOrganisation.ErpPersonRoles if x != self]
self._ErpOrganisation._ErpPersonRoles = filtered
self._ErpOrganisation = value
if self._ErpOrganisation is not None:
if self not in self._ErpOrganisation._ErpPersonRoles:
self._ErpOrganisation._ErpPersonRoles.append(self)
ErpOrganisation = property(getErpOrganisation, setErpOrganisation)
|
mit
| -5,857,759,180,858,380,000
| 40.506024
| 175
| 0.697823
| false
| 3.844866
| false
| false
| false
|
OpenMined/PySyft
|
packages/grid/apps/worker/src/main/core/groups/group_ops.py
|
1
|
2683
|
# stdlib
from datetime import datetime
from datetime import timedelta
from json import dumps
from json import loads
from json.decoder import JSONDecodeError
import logging
from secrets import token_hex
# grid relative
from ..codes import RESPONSE_MSG
from ..database import Group
from ..database import Role
from ..database import User
from ..database import UserGroup
from ..database import db
from ..database.utils import model_to_json
from ..exceptions import AuthorizationError
from ..exceptions import GroupNotFoundError
from ..exceptions import InvalidCredentialsError
from ..exceptions import MissingRequestKeyError
from ..exceptions import PyGridError
from ..exceptions import RoleNotFoundError
from ..exceptions import UserNotFoundError
def create_group(current_user, name):
user_role = Role.query.get(current_user.role)
if user_role is None:
raise RoleNotFoundError
if not user_role.can_create_groups:
raise AuthorizationError
new_group = Group(name=name)
db.session.add(new_group)
db.session.commit()
return model_to_json(new_group)
def get_group(current_user, group_id):
user_role = Role.query.get(current_user.role)
if user_role is None:
raise RoleNotFoundError
if not user_role.can_triage_requests:
raise AuthorizationError
group = Group.query.get(group_id)
if group is None:
raise GroupNotFoundError
return model_to_json(group)
def get_all_groups(current_user):
user_role = Role.query.get(current_user.role)
if user_role is None:
raise RoleNotFoundError
if not user_role.can_triage_requests:
raise AuthorizationError
groups = Group.query.all()
groups = [model_to_json(g) for g in groups]
return groups
def put_group(current_user, group_id, new_fields):
user_role = db.session.query(Role).get(current_user.role)
if user_role is None:
raise RoleNotFoundError
if not user_role.can_create_groups:
raise AuthorizationError
group = db.session.query(Group).get(group_id)
if group is None:
raise GroupNotFoundError
for key, value in new_fields.items():
setattr(group, key, value)
db.session.commit()
return model_to_json(group)
def delete_group(current_user, group_id):
user_role = db.session.query(Role).get(current_user.role)
if user_role is None:
raise RoleNotFoundError
if not user_role.can_create_groups:
raise AuthorizationError
group = db.session.query(Group).get(group_id)
if group is None:
raise GroupNotFoundError
db.session.delete(group)
db.session.commit()
return model_to_json(group)
|
apache-2.0
| -8,161,439,248,849,288,000
| 24.798077
| 61
| 0.71748
| false
| 3.905386
| false
| false
| false
|
priendeau/PyNOAAGeoMagIndiceHandler
|
build/lib.linux-x86_64-2.6/PyNOAAGeoMagIndiceHandler/GeoMagReferences.py
|
1
|
8642
|
from __future__ import with_statement
import os, sys, re, pynav, time, datetime, pytz ,pyaeso, spharm, matplotlib,xml_marshaller, xmlbuilder
from xml_marshaller import xml_marshaller
from xml_marshaller.xml_marshaller import *
from xmlbuilder import XMLBuilder
import numpy as np
from pynav import Pynav
from pyaeso import ets
from bctc import BC_TZ
from bctc.load import yield_load_points
from PyNOAAGeoMagIndiceHandler import decorator
from decorator import DictAssign
class GeoMagReferences( object ):
NodeUpdate=None
class GeoMagReferenceImpl( object ):
FieldReference={ }
SatelliteName=None
LapsInterleave=None
DictReference={
'field':{
'name':'dict',
'value':[ 'RealTimeSolarIndiceReference' ],
'dict':{
'name':'position',
'value':[ 'system' ],
'position':{
'name':'localtion',
'value':[ 'earth','sonde','satellite' ], },
'localtion':{
'name':'site',
'value':[ 'sk-ta3','ace','stereo-a','stereo-b' ] },
'site':{
'name':'detector',
'value':['neutronmonitor','swepam','magnetometer'],
'detector':{
'name':['stringfield','listfield','collectionfield'],
'value':[ 'title','field','laps','url','1m','5m','1h','12h','24h','1w','1m','1y','2y' ],
'stringfield':{
'name':'str',
'value':[ 'title', 'url'] },
'listfield':{
'name':'list',
'value':['field'] },
'collectionfield':{
'name':'dict',
'value':['laps','1m','5m','1h','12h','24h','1w','1m','1y','2y'] }
}
}
}
}
}
RealTimeSolarIndiceReference={
'system':{
'earth':{
'sk-ta3':{
'neutronmonitor':{
'laps':{
'1m':{ 'url':'http://neutronmonitor.ta3.sk/ascii.php?filename=/data/6h.dat' },
'5m':{ 'url':'http://neutronmonitor.ta3.sk/ascii.php?filename=/data/24h.dat'},
'1h':{ 'url':'http://neutronmonitor.ta3.sk/ascii.php?filename=/data/30d.dat' }
}
}
}
},
'satellite':{
'ace':{
'swepam':{
'title':'Solar Wind Electron Proton Alpha Monitor',
'field':['UT Date YR', 'UT Date MO','UT Date DA','UT Date HHMM','Modified Julian Day','Seconds of the Day','S','Density','Speed','Temperature' ],
'laps':{
'1m':{ 'url':"http://www.swpc.noaa.gov/ftpdir/lists/ace/ace_swepam_1m.txt" }
}
}
},
'stereo-a':{
'name':{
'a':{
'field':[ 'UT Date YR', 'UT Date MO', 'UT Date DA', 'UT Date HHMM', 'Modified Julian Day','Seconds of the Day','S','BR','BT','BN','Bt','Lat.','Long.' ],
'magnetometer':{
'laps':{
'1m':{
'url':"http://www.swpc.noaa.gov/ftpdir/lists/stereo/sta_mag_1m.txt" }
}
}
}
}
},
'stereo-b':{
'name':{
'a':{
'field':[ 'UT Date YR', 'UT Date MO', 'UT Date DA', 'UT Date HHMM', 'Modified Julian Day','Seconds of the Day','S','BR','BT','BN','Bt','Lat.','Long.' ],
'magnetometer':{
'laps':{
'1m':{
'url':"http://www.swpc.noaa.gov/ftpdir/lists/stereo/stb_mag_1m.txt" }
}
}
}
}
}
}
}
}
RootName=None
RootNameContent=None
@DictAssign( 'RealTimeSolarIndiceReference' )
def GetRoot( self ):
return self.RootName, self.RootNameContent
@DictAssign( 'RealTimeSolarIndiceReference' )
def SetRoot( self, value ):
DictRef=self.PropertyDictName
self.RootName = value
self.RootNameContent=self.RealTimeSolarIndiceReference[self.RootName]
PropertyRoot=property( GetRoot, SetRoot )
CollectionType=None
CollectionTypeContent=None
@DictAssign( 'RealTimeSolarIndiceReference' )
def GetCollectionType( self ):
return self.CollectionType, self.CollectionTypeContent
@DictAssign( 'RealTimeSolarIndiceReference' )
def SetCollectionType( self, value ):
self.CollectionType = value
self.CollectionTypeContent=self.RealTimeSolarIndiceReference[self.RootName][self.CollectionType]
PropertyCollectionType=property( GetCollectionType, SetCollectionType )
CollectionName=None
CollectionNameContent=None
@DictAssign( 'RealTimeSolarIndiceReference' )
def GetCollectionName( self ):
return self.CollectionName, CollectionNameContent
@DictAssign( 'RealTimeSolarIndiceReference' )
def SetCollectionName( self, value ):
self.CollectionName = value
self.CollectionNameContent=self.RealTimeSolarIndiceReference[self.RootName][self.CollectionType][self.CollectionName]
PropertyCollectionName=property( GetCollectionName, SetCollectionName )
CollectionSection=None
CollectionSectionContent=None
@DictAssign( 'RealTimeSolarIndiceReference' )
def GetCollectionSection( self ):
return self.CollectionSection, self.CollectionSectionContent
@DictAssign( 'RealTimeSolarIndiceReference' )
def SetCollectionSection( self, value ):
self.CollectionSection = value
self.CollectionSectionContent = self.RealTimeSolarIndiceReference[self.RootName][self.CollectionType][self.CollectionName][self.CollectionSection]
PropertyCollectionSection=property( GetCollectionSection, SetCollectionSection )
InstrumentName=None
InstrumentNameContent=None
@DictAssign( 'RealTimeSolarIndiceReference' )
def GetInstrumentName( self ):
return self.InstrumentName, self.InstrumentNameContent
@DictAssign( 'RealTimeSolarIndiceReference' )
def SetInstrumentName( self, value ):
self.InstrumentName = value
self.InstrumentNameContent = self.RealTimeSolarIndiceReference[self.RootName][self.CollectionType][self.CollectionName][self.CollectionSection]
PropertyInstrumentName=property( GetInstrumentName, SetInstrumentName )
RTSIR=None
RTSIRContent=None
@DictAssign( 'RealTimeSolarIndiceReference' )
def GetRTSIR( self ):
return self.RTSIR
@DictAssign( 'RealTimeSolarIndiceReference' )
def SetRTSIR( self, value ):
self.PropertyRoot, self.PropertyCollectionType, self.PropertyCollectionName, self.PropertyCollectionSection = value
self.RTSIR = MainDict[self.PropertyRoot][self.PropertyCollectionType][self.PropertyCollectionName][self.PropertyCollectionSection]
PropertyRTSIR=property( GetRTSIR, SetRTSIR )
### Property By Instrument:
FieldName=None
def GetFieldName( self ):
return self.RTSIR['field']
def SetFieldName( self, value ):
self.FieldName = value
self.RTSIR['field']=self.FieldName
PropertyFieldName=property( GetFieldName, SetFieldName )
LapsValue=None
def GetLapsValue( self ):
return self.RTSIR['laps'][self.LapsValue]
def SetLapsValue( self, value ):
self.LapsValue = value
PropertyLapsValue=property( GetLapsValue, SetLapsValue )
UrlName=None
UrlContent=None
def GetUrlName( self ):
return self.UrlContent
def SetUrlName( self, value ):
if len( value ) == 2 :
self.LapsValue, self.UrlName = value
else:
self.UrlName = value
if self.UrlName:
self.UrlContent = self.RTSIR['laps'][self.LapsValue]['url']
PropertyUrlName=property( GetUrlName, SetUrlName )
Title=None
def GetTitle( self ):
return self.Title
def SetTitle( self, value ):
if value:
self.RTSIR['laps'][self.LapsValue]['title']
PropertyTitle=property( GetTitle, SetTitle )
###self.PropertyInstrumentName, self.PropertyFieldName, self.PropertyLapsValue
def __init__( self , **Kargs ):
Node=self.GeoMagReferenceImpl()
for ItemKey in Kargs.keys( ):
setattr( self.Node, ItemKey, Kargs[ItemKey] )
def UpdateReference( self ):
self.UrlNav=pynav.Pynav()
if __name__.__eq__( '__main__' ):
AGeoLocE=GeoMagReferences()
|
bsd-3-clause
| -6,327,240,926,643,782,000
| 33.293651
| 171
| 0.5891
| false
| 3.856314
| false
| false
| false
|
tarasane/h2o-3
|
h2o-py/tests/testdir_algos/glm/pyunit_NOPASS_random_attack_medium.py
|
1
|
4940
|
import sys
sys.path.insert(1, "../../../")
import h2o, tests
import random
def random_attack():
def attack(family, train, valid, x, y):
kwargs = {}
kwargs['family'] = family
gaussian_links = ["inverse", "log", "identity"]
binomial_links = ["logit"]
poisson_links = ["log", "identity"]
gamma_links = ["inverse", "log", "identity"]
# randomly select parameters and their corresponding values
if random.randint(0,1): kwargs['max_iterations'] = random.randint(1,50)
if random.random() > 0.8: kwargs['beta_epsilon'] = random.random()
if random.randint(0,1): kwargs['solver'] = ["IRLSM", "L_BFGS"][random.randint(0,1)]
if random.randint(0,1): kwargs['standardize'] = [True, False][random.randint(0,1)]
if random.randint(0,1):
if family == "gaussian": kwargs['link'] = gaussian_links[random.randint(0,2)]
elif family == "binomial": kwargs['link'] = binomial_links[random.randint(0,0)]
elif family == "poisson" : kwargs['link'] = poisson_links[random.randint(0,1)]
elif family == "gamma" : kwargs['link'] = gamma_links[random.randint(0,2)]
if random.randint(0,1): kwargs['alpha'] = [random.random()]
if family == "binomial":
if random.randint(0,1): kwargs['prior'] = random.random()
if random.randint(0,1): kwargs['lambda_search'] = [True, False][random.randint(0,1)]
if 'lambda_search' in kwargs.keys():
if random.randint(0,1): kwargs['nlambdas'] = random.randint(2,10)
do_validation = [True, False][random.randint(0,1)]
# beta constraints
if random.randint(0,1):
bc = []
for n in x:
name = train.names[n]
lower_bound = random.uniform(-1,1)
upper_bound = lower_bound + random.random()
bc.append([name, lower_bound, upper_bound])
beta_constraints = h2o.H2OFrame(python_obj=bc)
beta_constraints.setNames(['names', 'lower_bounds', 'upper_bounds'])
kwargs['beta_constraints'] = beta_constraints.send_frame()
# display the parameters and their corresponding values
print "-----------------------"
print "x: {0}".format(x)
print "y: {0}".format(y)
print "validation: {0}".format(do_validation)
for k, v in zip(kwargs.keys(), kwargs.values()):
if k == 'beta_constraints':
print k + ": "
beta_constraints.show()
else:
print k + ": {0}".format(v)
if do_validation: h2o.glm(x=train[x], y=train[y], validation_x=valid[x], validation_y=valid[y], **kwargs)
else: h2o.glm(x=train[x], y=train[y], **kwargs)
print "-----------------------"
print "Import and data munging..."
pros = h2o.upload_file(h2o.locate("smalldata/prostate/prostate.csv.zip"))
pros[1] = pros[1].asfactor()
r = pros[0].runif() # a column of length pros.nrow with values between 0 and 1
# ~80/20 train/validation split
pros_train = pros[r > .2]
pros_valid = pros[r <= .2]
cars = h2o.upload_file(h2o.locate("smalldata/junit/cars.csv"))
r = cars[0].runif()
cars_train = cars[r > .2]
cars_valid = cars[r <= .2]
print
print "======================================================================"
print "============================== Binomial =============================="
print "======================================================================"
for i in range(10):
attack("binomial", pros_train, pros_valid, random.sample([2,3,4,5,6,7,8],random.randint(1,7)), 1)
print
print "======================================================================"
print "============================== Gaussian =============================="
print "======================================================================"
for i in range(10):
attack("gaussian", cars_train, cars_valid, random.sample([2,3,4,5,6,7],random.randint(1,6)), 1)
print
print "======================================================================"
print "============================== Poisson =============================="
print "======================================================================"
for i in range(10):
attack("poisson", cars_train, cars_valid, random.sample([1,3,4,5,6,7],random.randint(1,6)), 2)
print
print "======================================================================"
print "============================== Gamma =============================="
print "======================================================================"
for i in range(10):
attack("gamma", pros_train, pros_valid, random.sample([1,2,3,5,6,7,8],random.randint(1,7)), 4)
if __name__ == "__main__":
tests.run_test(sys.argv, random_attack)
|
apache-2.0
| -440,998,212,565,106,400
| 46.5
| 113
| 0.470445
| false
| 3.811728
| false
| false
| false
|
GoogleCloudPlatform/declarative-resource-client-library
|
python/services/networkservices/beta/endpoint_config_selector.py
|
1
|
21391
|
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.network_services import (
endpoint_config_selector_pb2,
)
from google3.cloud.graphite.mmv2.services.google.network_services import (
endpoint_config_selector_pb2_grpc,
)
from typing import List
class EndpointConfigSelector(object):
def __init__(
self,
name: str = None,
create_time: str = None,
update_time: str = None,
labels: dict = None,
type: str = None,
authorization_policy: str = None,
http_filters: dict = None,
endpoint_matcher: dict = None,
traffic_port_selector: dict = None,
description: str = None,
server_tls_policy: str = None,
client_tls_policy: str = None,
project: str = None,
location: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.labels = labels
self.type = type
self.authorization_policy = authorization_policy
self.http_filters = http_filters
self.endpoint_matcher = endpoint_matcher
self.traffic_port_selector = traffic_port_selector
self.description = description
self.server_tls_policy = server_tls_policy
self.client_tls_policy = client_tls_policy
self.project = project
self.location = location
self.service_account_file = service_account_file
def apply(self):
stub = endpoint_config_selector_pb2_grpc.NetworkservicesBetaEndpointConfigSelectorServiceStub(
channel.Channel()
)
request = (
endpoint_config_selector_pb2.ApplyNetworkservicesBetaEndpointConfigSelectorRequest()
)
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.labels):
request.resource.labels = Primitive.to_proto(self.labels)
if EndpointConfigSelectorTypeEnum.to_proto(self.type):
request.resource.type = EndpointConfigSelectorTypeEnum.to_proto(self.type)
if Primitive.to_proto(self.authorization_policy):
request.resource.authorization_policy = Primitive.to_proto(
self.authorization_policy
)
if EndpointConfigSelectorHttpFilters.to_proto(self.http_filters):
request.resource.http_filters.CopyFrom(
EndpointConfigSelectorHttpFilters.to_proto(self.http_filters)
)
else:
request.resource.ClearField("http_filters")
if EndpointConfigSelectorEndpointMatcher.to_proto(self.endpoint_matcher):
request.resource.endpoint_matcher.CopyFrom(
EndpointConfigSelectorEndpointMatcher.to_proto(self.endpoint_matcher)
)
else:
request.resource.ClearField("endpoint_matcher")
if EndpointConfigSelectorTrafficPortSelector.to_proto(
self.traffic_port_selector
):
request.resource.traffic_port_selector.CopyFrom(
EndpointConfigSelectorTrafficPortSelector.to_proto(
self.traffic_port_selector
)
)
else:
request.resource.ClearField("traffic_port_selector")
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.server_tls_policy):
request.resource.server_tls_policy = Primitive.to_proto(
self.server_tls_policy
)
if Primitive.to_proto(self.client_tls_policy):
request.resource.client_tls_policy = Primitive.to_proto(
self.client_tls_policy
)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
request.service_account_file = self.service_account_file
response = stub.ApplyNetworkservicesBetaEndpointConfigSelector(request)
self.name = Primitive.from_proto(response.name)
self.create_time = Primitive.from_proto(response.create_time)
self.update_time = Primitive.from_proto(response.update_time)
self.labels = Primitive.from_proto(response.labels)
self.type = EndpointConfigSelectorTypeEnum.from_proto(response.type)
self.authorization_policy = Primitive.from_proto(response.authorization_policy)
self.http_filters = EndpointConfigSelectorHttpFilters.from_proto(
response.http_filters
)
self.endpoint_matcher = EndpointConfigSelectorEndpointMatcher.from_proto(
response.endpoint_matcher
)
self.traffic_port_selector = EndpointConfigSelectorTrafficPortSelector.from_proto(
response.traffic_port_selector
)
self.description = Primitive.from_proto(response.description)
self.server_tls_policy = Primitive.from_proto(response.server_tls_policy)
self.client_tls_policy = Primitive.from_proto(response.client_tls_policy)
self.project = Primitive.from_proto(response.project)
self.location = Primitive.from_proto(response.location)
def delete(self):
stub = endpoint_config_selector_pb2_grpc.NetworkservicesBetaEndpointConfigSelectorServiceStub(
channel.Channel()
)
request = (
endpoint_config_selector_pb2.DeleteNetworkservicesBetaEndpointConfigSelectorRequest()
)
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.labels):
request.resource.labels = Primitive.to_proto(self.labels)
if EndpointConfigSelectorTypeEnum.to_proto(self.type):
request.resource.type = EndpointConfigSelectorTypeEnum.to_proto(self.type)
if Primitive.to_proto(self.authorization_policy):
request.resource.authorization_policy = Primitive.to_proto(
self.authorization_policy
)
if EndpointConfigSelectorHttpFilters.to_proto(self.http_filters):
request.resource.http_filters.CopyFrom(
EndpointConfigSelectorHttpFilters.to_proto(self.http_filters)
)
else:
request.resource.ClearField("http_filters")
if EndpointConfigSelectorEndpointMatcher.to_proto(self.endpoint_matcher):
request.resource.endpoint_matcher.CopyFrom(
EndpointConfigSelectorEndpointMatcher.to_proto(self.endpoint_matcher)
)
else:
request.resource.ClearField("endpoint_matcher")
if EndpointConfigSelectorTrafficPortSelector.to_proto(
self.traffic_port_selector
):
request.resource.traffic_port_selector.CopyFrom(
EndpointConfigSelectorTrafficPortSelector.to_proto(
self.traffic_port_selector
)
)
else:
request.resource.ClearField("traffic_port_selector")
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.server_tls_policy):
request.resource.server_tls_policy = Primitive.to_proto(
self.server_tls_policy
)
if Primitive.to_proto(self.client_tls_policy):
request.resource.client_tls_policy = Primitive.to_proto(
self.client_tls_policy
)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
response = stub.DeleteNetworkservicesBetaEndpointConfigSelector(request)
@classmethod
def list(self, project, location, service_account_file=""):
stub = endpoint_config_selector_pb2_grpc.NetworkservicesBetaEndpointConfigSelectorServiceStub(
channel.Channel()
)
request = (
endpoint_config_selector_pb2.ListNetworkservicesBetaEndpointConfigSelectorRequest()
)
request.service_account_file = service_account_file
request.Project = project
request.Location = location
return stub.ListNetworkservicesBetaEndpointConfigSelector(request).items
def to_proto(self):
resource = (
endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelector()
)
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.labels):
resource.labels = Primitive.to_proto(self.labels)
if EndpointConfigSelectorTypeEnum.to_proto(self.type):
resource.type = EndpointConfigSelectorTypeEnum.to_proto(self.type)
if Primitive.to_proto(self.authorization_policy):
resource.authorization_policy = Primitive.to_proto(
self.authorization_policy
)
if EndpointConfigSelectorHttpFilters.to_proto(self.http_filters):
resource.http_filters.CopyFrom(
EndpointConfigSelectorHttpFilters.to_proto(self.http_filters)
)
else:
resource.ClearField("http_filters")
if EndpointConfigSelectorEndpointMatcher.to_proto(self.endpoint_matcher):
resource.endpoint_matcher.CopyFrom(
EndpointConfigSelectorEndpointMatcher.to_proto(self.endpoint_matcher)
)
else:
resource.ClearField("endpoint_matcher")
if EndpointConfigSelectorTrafficPortSelector.to_proto(
self.traffic_port_selector
):
resource.traffic_port_selector.CopyFrom(
EndpointConfigSelectorTrafficPortSelector.to_proto(
self.traffic_port_selector
)
)
else:
resource.ClearField("traffic_port_selector")
if Primitive.to_proto(self.description):
resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.server_tls_policy):
resource.server_tls_policy = Primitive.to_proto(self.server_tls_policy)
if Primitive.to_proto(self.client_tls_policy):
resource.client_tls_policy = Primitive.to_proto(self.client_tls_policy)
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
resource.location = Primitive.to_proto(self.location)
return resource
class EndpointConfigSelectorHttpFilters(object):
def __init__(self, http_filters: list = None):
self.http_filters = http_filters
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelectorHttpFilters()
)
if Primitive.to_proto(resource.http_filters):
res.http_filters.extend(Primitive.to_proto(resource.http_filters))
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EndpointConfigSelectorHttpFilters(
http_filters=Primitive.from_proto(resource.http_filters),
)
class EndpointConfigSelectorHttpFiltersArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [EndpointConfigSelectorHttpFilters.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [EndpointConfigSelectorHttpFilters.from_proto(i) for i in resources]
class EndpointConfigSelectorEndpointMatcher(object):
def __init__(self, metadata_label_matcher: dict = None):
self.metadata_label_matcher = metadata_label_matcher
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelectorEndpointMatcher()
)
if EndpointConfigSelectorEndpointMatcherMetadataLabelMatcher.to_proto(
resource.metadata_label_matcher
):
res.metadata_label_matcher.CopyFrom(
EndpointConfigSelectorEndpointMatcherMetadataLabelMatcher.to_proto(
resource.metadata_label_matcher
)
)
else:
res.ClearField("metadata_label_matcher")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EndpointConfigSelectorEndpointMatcher(
metadata_label_matcher=EndpointConfigSelectorEndpointMatcherMetadataLabelMatcher.from_proto(
resource.metadata_label_matcher
),
)
class EndpointConfigSelectorEndpointMatcherArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [EndpointConfigSelectorEndpointMatcher.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [EndpointConfigSelectorEndpointMatcher.from_proto(i) for i in resources]
class EndpointConfigSelectorEndpointMatcherMetadataLabelMatcher(object):
def __init__(
self, metadata_label_match_criteria: str = None, metadata_labels: list = None
):
self.metadata_label_match_criteria = metadata_label_match_criteria
self.metadata_labels = metadata_labels
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcher()
)
if EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum.to_proto(
resource.metadata_label_match_criteria
):
res.metadata_label_match_criteria = EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum.to_proto(
resource.metadata_label_match_criteria
)
if EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelsArray.to_proto(
resource.metadata_labels
):
res.metadata_labels.extend(
EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelsArray.to_proto(
resource.metadata_labels
)
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EndpointConfigSelectorEndpointMatcherMetadataLabelMatcher(
metadata_label_match_criteria=EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum.from_proto(
resource.metadata_label_match_criteria
),
metadata_labels=EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelsArray.from_proto(
resource.metadata_labels
),
)
class EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
EndpointConfigSelectorEndpointMatcherMetadataLabelMatcher.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
EndpointConfigSelectorEndpointMatcherMetadataLabelMatcher.from_proto(i)
for i in resources
]
class EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabels(object):
def __init__(self, label_name: str = None, label_value: str = None):
self.label_name = label_name
self.label_value = label_value
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabels()
)
if Primitive.to_proto(resource.label_name):
res.label_name = Primitive.to_proto(resource.label_name)
if Primitive.to_proto(resource.label_value):
res.label_value = Primitive.to_proto(resource.label_value)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabels(
label_name=Primitive.from_proto(resource.label_name),
label_value=Primitive.from_proto(resource.label_value),
)
class EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelsArray(
object
):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabels.to_proto(
i
)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabels.from_proto(
i
)
for i in resources
]
class EndpointConfigSelectorTrafficPortSelector(object):
def __init__(self, ports: list = None):
self.ports = ports
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelectorTrafficPortSelector()
)
if Primitive.to_proto(resource.ports):
res.ports.extend(Primitive.to_proto(resource.ports))
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EndpointConfigSelectorTrafficPortSelector(
ports=Primitive.from_proto(resource.ports),
)
class EndpointConfigSelectorTrafficPortSelectorArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
EndpointConfigSelectorTrafficPortSelector.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
EndpointConfigSelectorTrafficPortSelector.from_proto(i) for i in resources
]
class EndpointConfigSelectorTypeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelectorTypeEnum.Value(
"NetworkservicesBetaEndpointConfigSelectorTypeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelectorTypeEnum.Name(
resource
)[
len("NetworkservicesBetaEndpointConfigSelectorTypeEnum") :
]
class EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum(
object
):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum.Value(
"NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum%s"
% resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum.Name(
resource
)[
len(
"NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum"
) :
]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
|
apache-2.0
| 6,402,314,300,991,686,000
| 36.201739
| 157
| 0.663457
| false
| 4.509064
| true
| false
| false
|
romana/networking-romana
|
networking_romana/_i18n.py
|
1
|
1404
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_i18n
DOMAIN = "networking_romana"
_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
# The primary translation function using the well-known name "_"
_ = _translators.primary
# The contextual translation function using the name "_C"
# requires oslo.i18n >=2.1.0
_C = _translators.contextual_form
# The plural translation function using the name "_P"
# requires oslo.i18n >=2.1.0
_P = _translators.plural_form
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
def get_available_languages():
return oslo_i18n.get_available_languages(DOMAIN)
|
apache-2.0
| 6,573,460,745,959,770,000
| 31.651163
| 78
| 0.730057
| false
| 3.518797
| false
| false
| false
|
caronc/nzbget-subliminal
|
Subliminal/pkg_resources.py
|
1
|
87985
|
"""Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
import sys, os, zipimport, time, re, imp, types
try:
frozenset
except NameError:
from sets import ImmutableSet as frozenset
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
# This marker is used to simplify the process that checks is the
# setuptools package was installed by the Setuptools project
# or by the Distribute project, in case Setuptools creates
# a distribution with the same version.
#
# The bootstrapping script for instance, will check if this
# attribute is present to decide wether to reinstall the package
_distribute = True
def _bypass_ensure_directory(name, mode=0777):
# Sandbox-bypassing version of ensure_directory()
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(name)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
mkdir(dirname, mode)
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform(); m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
pass # not Mac OS X
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info', 'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError','VersionConflict','DistributionNotFound','UnknownExtra',
'ExtractionError',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__+repr(self.args)
class VersionConflict(ResolutionError):
"""An already-installed version conflicts with the requested version"""
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = sys.version[:3]
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq,Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
import platform
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
import plistlib
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC':'ppc', 'Power_Macintosh':'ppc'}.get(machine,machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
from distutils.util import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (int(version[0]), int(version[1]),
_macosx_arch(machine))
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
get_platform = get_build_platform # XXX backward compat
def compatible_platforms(provided,required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided==required:
return True # easy case
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
#import warnings
#warnings.warn("Mac eggs should be rebuilt to "
# "use the macosx designation instead of darwin.",
# category=DeprecationWarning)
return True
return False # egg isn't macosx or legacy darwin
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
run_main = run_script # backward compatibility
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist,basestring): dist = Requirement.parse(dist)
if isinstance(dist,Requirement): dist = get_provider(dist)
if not isinstance(dist,Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet(object):
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry,True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self,dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
raise VersionConflict(dist,req) # XXX add more info
else:
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
for dist in self:
entries = dist.get_entry_map(group)
if name is None:
for ep in entries.values():
yield ep
elif name in entries:
yield entries[name]
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
for key in self.entry_keys[item]:
if key not in seen:
seen[key]=1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set. If it's added, any
callbacks registered with the ``subscribe()`` method will be called.
"""
if insert:
dist.insert_on(self.entries, entry)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry,[])
keys2 = self.entry_keys.setdefault(dist.location,[])
if dist.key in self.by_key:
return # ignore hidden distros
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None, replacement=True):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
"""
requirements = list(requirements)[::-1] # set up the stack
processed = {} # set of processed requirements
best = {} # key -> dist
to_activate = []
while requirements:
req = requirements.pop(0) # process dependencies breadth-first
if _override_setuptools(req) and replacement:
req = Requirement.parse('distribute')
if req in processed:
# Ignore cyclic or redundant dependencies
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None:
if env is None:
env = Environment(self.entries)
dist = best[req.key] = env.best_match(req, self, installer)
if dist is None:
#msg = ("The '%s' distribution was not found on this "
# "system, and is required by this application.")
#raise DistributionNotFound(msg % req)
# unfortunately, zc.buildout uses a str(err)
# to get the name of the distribution here..
raise DistributionNotFound(req)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
raise VersionConflict(dist,req) # XXX put more info here
requirements.extend(dist.requires(req.extras)[::-1])
processed[req] = True
return to_activate # return list of distros to activate
def find_plugins(self,
plugin_env, full_env=None, installer=None, fallback=True
):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
map(working_set.add, distributions) # add plugins+libs to sys.path
print 'Could not load', errors # display errors
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
plugin_projects.sort() # scan project names in alphabetic order
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
map(shadow_set.add, self) # put all our entries in shadow_set
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError,v:
error_info[dist] = v # save error info
if fallback:
continue # try the next older version of project
else:
break # give up on this project, keep going
else:
map(shadow_set.add, resolvees)
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback):
"""Invoke `callback` for all distributions (including existing ones)"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
class Environment(object):
"""Searchable snapshot of distributions on a search path"""
def __init__(self, search_path=None, platform=get_supported_platform(), python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'2.4'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self._cache = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
return (self.python is None or dist.py_version is None
or dist.py_version==self.python) \
and compatible_platforms(dist.platform,self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self,project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
"""
try:
return self._cache[project_name]
except KeyError:
project_name = project_name.lower()
if project_name not in self._distmap:
return []
if project_name not in self._cache:
dists = self._cache[project_name] = self._distmap[project_name]
_sort_dists(dists)
return self._cache[project_name]
def add(self,dist):
"""Add `dist` if we ``can_add()`` it and it isn't already added"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key,[])
if dist not in dists:
dists.append(dist)
if dist.key in self._cache:
_sort_dists(self._cache[dist.key])
def best_match(self, req, working_set, installer=None):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
dist = working_set.find(req)
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
return self.obtain(req, installer) # try and download/install
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]: yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other,Distribution):
self.add(other)
elif isinstance(other,Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
AvailableDistributions = Environment # XXX backward compatibility
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
err = ExtractionError("""Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s) to the Python egg
cache:
%s
The Python egg cache directory is currently set to:
%s
Perhaps your account does not have write access to this directory? You can
change the cache directory by setting the PYTHON_EGG_CACHE environment
variable to point to an accessible directory.
""" % (old_exc, cache_path)
)
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name+'-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except:
self.extraction_error()
self.cached_files[target_path] = 1
return target_path
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0555) & 07777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
"Application Data" directory. On all other systems, it's "~/.python-eggs".
"""
try:
return os.environ['PYTHON_EGG_CACHE']
except KeyError:
pass
if os.name!='nt':
return os.path.expanduser('~/.python-eggs')
app_data = 'Application Data' # XXX this may be locale-specific!
app_homes = [
(('APPDATA',), None), # best option, should be locale-safe
(('USERPROFILE',), app_data),
(('HOMEDRIVE','HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
(('WINDIR',), app_data), # 95/98/ME
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, os.environ[key])
else:
break
else:
if subdir:
dirname = os.path.join(dirname,subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE enviroment variable"
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""Convert an arbitrary string to a standard version string
Spaces become dots, and all other non-alphanumeric characters become
dashes, with runs of multiple dashes condensed to a single dash.
"""
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return StringIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def has_metadata(self, name):
return self.egg_info and self._has(self._fn(self.egg_info,name))
if sys.version_info <= (3,):
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name))
else:
def get_metadata(self, name):
if not self.egg_info:
return ""
return self._get(self._fn(self.egg_info,name)).decode("utf-8")
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self,resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self,name):
return self.egg_info and self._isdir(self._fn(self.egg_info,name))
def resource_listdir(self,resource_name):
return self._listdir(self._fn(self.module_path,resource_name))
def metadata_listdir(self,name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info,name))
return []
def run_script(self,script_name,namespace):
script = 'scripts/'+script_name
if not self.has_metadata(script):
raise ResolutionError("No script named %r" % script_name)
script_text = self.get_metadata(script).replace('\r\n','\n')
script_text = script_text.replace('\r','\n')
script_filename = self._fn(self.egg_info,script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
execfile(script_filename, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text,script_filename,'exec')
exec script_code in namespace, namespace
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self,module):
NullProvider.__init__(self,module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path!=old:
if path.lower().endswith('.egg'):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self,path):
return os.path.isdir(path)
def _listdir(self,path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
stream = open(path, 'rb')
try:
return stream.read()
finally:
stream.close()
register_loader_type(type(None), DefaultProvider)
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
_isdir = _has = lambda self,path: False
_get = lambda self,path: ''
_listdir = lambda self,path: []
module_path = None
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
def __init__(self, module):
EggProvider.__init__(self,module)
self.zipinfo = zipimport._zip_directory_cache[self.loader.archive]
self.zip_pre = self.loader.archive+os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.zip_pre)
)
def _parts(self,zip_path):
# Convert a zipfile subpath into an egg-relative path part list
fspath = self.zip_pre+zip_path # pseudo-fs path
if fspath.startswith(self.egg_root+os.sep):
return fspath[len(self.egg_root)+1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath,self.egg_root)
)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
return os.path.dirname(last) # return the extracted directory name
zip_stat = self.zipinfo[zip_path]
t,d,size = zip_stat[5], zip_stat[6], zip_stat[3]
date_time = (
(d>>9)+1980, (d>>5)&0xF, d&0x1F, # ymd
(t&0xFFFF)>>11, (t>>5)&0x3F, (t&0x1F) * 2, 0, 0, -1 # hms, etc.
)
timestamp = time.mktime(date_time)
try:
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if os.path.isfile(real_path):
stat = os.stat(real_path)
if stat.st_size==size and stat.st_mtime==timestamp:
# size and stamp match, don't bother extracting
return real_path
outf, tmpnam = _mkstemp(".$extract", dir=os.path.dirname(real_path))
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp,timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
stat = os.stat(real_path)
if stat.st_size==size and stat.st_mtime==timestamp:
# size and stamp match, somebody did it just ahead of
# us, so we're done
return real_path
elif os.name=='nt': # Windows, del old file and retry
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
manager.extraction_error() # report a user-friendly error
return real_path
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self,fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self,fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.egg_root,resource_name))
def _resource_to_zip(self,resource_name):
return self._zipinfo_name(self._fn(self.module_path,resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self,path):
self.path = path
def has_metadata(self,name):
return name=='PKG-INFO'
def get_metadata(self,name):
if name=='PKG-INFO':
f = open(self.path,'rU')
metadata = f.read()
f.close()
return metadata
raise KeyError("No metadata except PKG-INFO is available")
def get_metadata_lines(self,name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir,project_name=dist_name,metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zipinfo = zipimport._zip_directory_cache[importer.archive]
self.zip_pre = importer.archive+os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
class ImpWrapper:
"""PEP 302 Importer that wraps Python's "normal" import algorithm"""
def __init__(self, path=None):
self.path = path
def find_module(self, fullname, path=None):
subname = fullname.split(".")[-1]
if subname != fullname and self.path is None:
return None
if self.path is None:
path = None
else:
path = [self.path]
try:
file, filename, etc = imp.find_module(subname, path)
except ImportError:
return None
return ImpLoader(file, filename, etc)
class ImpLoader:
"""PEP 302 Loader that wraps Python's "normal" import algorithm"""
def __init__(self, file, filename, etc):
self.file = file
self.filename = filename
self.etc = etc
def load_module(self, fullname):
try:
mod = imp.load_module(fullname, self.file, self.filename, self.etc)
finally:
if self.file: self.file.close()
# Note: we don't set __loader__ because we want the module to look
# normal; i.e. this is just a wrapper for standard import machinery
return mod
def get_importer(path_item):
"""Retrieve a PEP 302 "importer" for the given path item
If there is no importer, this returns a wrapper around the builtin import
machinery. The returned importer is only cached if it was created by a
path hook.
"""
try:
importer = sys.path_importer_cache[path_item]
except KeyError:
for hook in sys.path_hooks:
try:
importer = hook(path_item)
except ImportError:
pass
else:
break
else:
importer = None
sys.path_importer_cache.setdefault(path_item,importer)
if importer is None:
try:
importer = ImpWrapper(path_item)
except ImportError:
pass
return importer
try:
from pkgutil import get_importer, ImpImporter
except ImportError:
pass # Python 2.3 or 2.4, use our own implementation
else:
ImpWrapper = ImpImporter # Python 2.5, use pkgutil's implementation
del ImpLoader, ImpImporter
_distribution_finders = {}
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_in_zip(importer, path_item, only=False):
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
return # don't yield nested distros
for subitem in metadata.resource_listdir('/'):
if subitem.endswith('.egg'):
subpath = os.path.join(path_item, subitem)
for dist in find_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist
register_finder(zipimport.zipimporter, find_in_zip)
def StringIO(*args, **kw):
"""Thunk to load the real StringIO on demand"""
global StringIO
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
return StringIO(*args,**kw)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object,find_nothing)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if os.path.isdir(path_item) and os.access(path_item, os.R_OK):
if path_item.lower().endswith('.egg'):
# unpacked egg
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item,'EGG-INFO')
)
)
else:
# scan for .egg and .egg-info in directory
for entry in os.listdir(path_item):
lower = entry.lower()
if lower.endswith('.egg-info'):
fullpath = os.path.join(path_item, entry)
if os.path.isdir(fullpath):
# egg-info directory, allow getting metadata
metadata = PathMetadata(path_item, fullpath)
else:
metadata = FileMetadata(fullpath)
yield Distribution.from_location(
path_item,entry,metadata,precedence=DEVELOP_DIST
)
elif not only and lower.endswith('.egg'):
for dist in find_distributions(os.path.join(path_item, entry)):
yield dist
elif not only and lower.endswith('.egg-link'):
for line in open(os.path.join(path_item, entry)):
if not line.strip(): continue
for item in find_distributions(os.path.join(path_item,line.rstrip())):
yield item
break
register_finder(ImpWrapper,find_on_path)
_namespace_handlers = {}
_namespace_packages = {}
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer,path_entry,moduleName,module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []; _set_parent_ns(packageName)
elif not hasattr(module,'__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer,path_item,packageName,module)
if subpath is not None:
path = module.__path__; path.append(subpath)
loader.load_module(packageName); module.__path__ = path
return subpath
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent,[]).append(packageName)
_namespace_packages.setdefault(packageName,[])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
imp.acquire_lock()
try:
for package in _namespace_packages.get(parent,()):
subpath = _handle_ns(package, path_item)
if subpath: fixup_namespace_packages(subpath,package)
finally:
imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item)==normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(ImpWrapper,file_ns_handler)
register_namespace_handler(zipimport.zipimporter,file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object,null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(filename))
def _normalize_cached(filename,_cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a ``basestring`` or sequence"""
if isinstance(strs,basestring):
for s in strs.splitlines():
s = s.strip()
if s and not s.startswith('#'): # skip blank lines/comments
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
LINE_END = re.compile(r"\s*(#.*)?$").match # whitespace and comment
CONTINUE = re.compile(r"\s*\\\s*(#.*)?$").match # line continuation
DISTRO = re.compile(r"\s*((\w|[-.])+)").match # Distribution or extra
VERSION = re.compile(r"\s*(<=?|>=?|==|!=)\s*((\w|[-.])+)").match # ver. info
COMMA = re.compile(r"\s*,").match # comma between items
OBRACKET = re.compile(r"\s*\[").match
CBRACKET = re.compile(r"\s*\]").match
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"(?P<name>[^-]+)"
r"( -(?P<ver>[^-]+) (-py(?P<pyver>[^-]+) (-(?P<plat>.+))? )? )?",
re.VERBOSE | re.IGNORECASE
).match
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = {'pre':'c', 'preview':'c','-':'final-','rc':'c','dev':'@'}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part,part)
if not part or part=='.':
continue
if part[:1] in '0123456789':
yield part.zfill(8) # pad for numeric comparison
else:
yield '*'+part
yield '*final' # ensure that alpha/beta/candidate are before final
def parse_version(s):
"""Convert a version string to a chronologically-sortable key
This is a rough cross between distutils' StrictVersion and LooseVersion;
if you give it versions that would work with StrictVersion, then it behaves
the same; otherwise it acts like a slightly-smarter LooseVersion. It is
*possible* to create pathological version coding schemes that will fool
this parser, but they should be very rare in practice.
The returned value will be a tuple of strings. Numeric portions of the
version are padded to 8 digits so they will compare numerically, but
without relying on how numbers compare relative to strings. Dots are
dropped, but dashes are retained. Trailing zeros between alpha segments
or dashes are suppressed, so that e.g. "2.4.0" is considered the same as
"2.4". Alphanumeric parts are lower-cased.
The algorithm assumes that strings like "-" and any alpha string that
alphabetically follows "final" represents a "patch level". So, "2.4-1"
is assumed to be a branch or patch of "2.4", and therefore "2.4.1" is
considered newer than "2.4-1", which in turn is newer than "2.4".
Strings like "a", "b", "c", "alpha", "beta", "candidate" and so on (that
come before "final" alphabetically) are assumed to be pre-release versions,
so that the version "2.4" is considered newer than "2.4a1".
Finally, to handle miscellaneous cases, the strings "pre", "preview", and
"rc" are treated as if they were "c", i.e. as though they were release
candidates, and therefore are not as new as a version string that does not
contain them, and "dev" is replaced with an '@' so that it sorts lower than
than any other pre-release tag.
"""
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
if part<'*final': # remove '-' before a prerelease tag
while parts and parts[-1]=='*final-': parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1]=='00000000':
parts.pop()
parts.append(part)
return tuple(parts)
class EntryPoint(object):
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = Requirement.parse(("x[%s]" % ','.join(extras))).extras
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, env=None, installer=None):
if require: self.require(env, installer)
entry = __import__(self.module_name, globals(),globals(), ['__name__'])
for attr in self.attrs:
try:
entry = getattr(entry,attr)
except AttributeError:
raise ImportError("%r has no %r attribute" % (entry,attr))
return entry
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
map(working_set.add,
working_set.resolve(self.dist.requires(self.extras),env,installer))
#@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1,extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
try:
attrs = extras = ()
name,value = src.split('=',1)
if '[' in value:
value,extras = value.split('[',1)
req = Requirement.parse("x["+extras)
if req.specs: raise ValueError
extras = req.extras
if ':' in value:
value,attrs = value.split(':',1)
if not MODULE(attrs.rstrip()):
raise ValueError
attrs = attrs.rstrip().split('.')
except ValueError:
raise ValueError(
"EntryPoint must be in 'name=module:attrs [extras]' format",
src
)
else:
return cls(name.strip(), value.strip(), attrs, extras, dist)
parse = classmethod(parse)
#@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name]=ep
return this
parse_group = classmethod(parse_group)
#@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data,dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
parse_map = classmethod(parse_map)
class Distribution(object):
"""Wrap an actual or potential sys.path entry w/metadata"""
def __init__(self,
location=None, metadata=None, project_name=None, version=None,
py_version=PY_MAJOR, platform=None, precedence = EGG_DIST
):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
#@classmethod
def from_location(cls,location,basename,metadata=None,**kw):
project_name, version, py_version, platform = [None]*4
basename, ext = os.path.splitext(basename)
if ext.lower() in (".egg",".egg-info"):
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name','ver','pyver','plat'
)
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)
from_location = classmethod(from_location)
hashcmp = property(
lambda self: (
getattr(self,'parsed_version',()), self.precedence, self.key,
-len(self.location or ''), self.location, self.py_version,
self.platform
)
)
def __hash__(self): return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
#@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
key = property(key)
#@property
def parsed_version(self):
try:
return self._parsed_version
except AttributeError:
self._parsed_version = pv = parse_version(self.version)
return pv
parsed_version = property(parsed_version)
#@property
def version(self):
try:
return self._version
except AttributeError:
for line in self._get_metadata('PKG-INFO'):
if line.lower().startswith('version:'):
self._version = safe_version(line.split(':',1)[1].strip())
return self._version
else:
raise ValueError(
"Missing 'Version:' header and/or PKG-INFO file", self
)
version = property(version)
#@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
dm = self.__dep_map = {None: []}
for name in 'requires.txt', 'depends.txt':
for extra,reqs in split_sections(self._get_metadata(name)):
if extra: extra = safe_extra(extra)
dm.setdefault(extra,[]).extend(parse_requirements(reqs))
return dm
_dep_map = property(_dep_map)
def requires(self,extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None,()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata(self,name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def activate(self,path=None):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None: path = sys.path
self.insert_on(path)
if path is sys.path:
fixup_namespace_packages(self.location)
map(declare_namespace, self._get_metadata('namespace_packages.txt'))
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-'+self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self,self.location)
else:
return str(self)
def __str__(self):
try: version = getattr(self,'version',None)
except ValueError: version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name,version)
def __getattr__(self,attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError,attr
return getattr(self._provider, attr)
#@classmethod
def from_filename(cls,filename,metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
from_filename = classmethod(from_filename)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
return Requirement.parse('%s==%s' % (self.project_name, self.version))
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group,name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group,name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group,{})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc = None):
"""Insert self.location in path before its nearest parent directory"""
loc = loc or self.location
if self.project_name == 'setuptools':
try:
version = self.version
except ValueError:
version = ''
if not loc:
return
if path is sys.path:
self.check_version_conflict()
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath= map(_normalize_cached, path)
bp = None
for p, item in enumerate(npath):
if item==nloc:
break
elif item==bdir and self.precedence==EGG_DIST:
# if it's an .egg, give it precedence over its directory
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while 1:
try:
np = npath.index(nloc, p+1)
except ValueError:
break
else:
del npath[np], path[np]
p = np # ha!
return
def check_version_conflict(self):
if self.key=='distribute':
return # ignore the inevitable setuptools self-conflicts :(
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages
):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and normalize_path(fn).startswith(loc):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for "+repr(self))
return False
return True
def clone(self,**kw):
"""Copy this distribution, substituting in any changed keyword args"""
for attr in (
'project_name', 'version', 'py_version', 'platform', 'location',
'precedence'
):
kw.setdefault(attr, getattr(self,attr,None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
#@property
def extras(self):
return [dep for dep in self._dep_map if dep]
extras = property(extras)
def issue_warning(*args,**kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
from warnings import warn
warn(stacklevel = level+1, *args, **kw)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be an instance of ``basestring``, or a (possibly-nested)
iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
def scan_list(ITEM,TERMINATOR,line,p,groups,item_name):
items = []
while not TERMINATOR(line,p):
if CONTINUE(line,p):
try:
line = lines.next(); p = 0
except StopIteration:
raise ValueError(
"\\ must not appear on the last nonblank line"
)
match = ITEM(line,p)
if not match:
raise ValueError("Expected "+item_name+" in",line,"at",line[p:])
items.append(match.group(*groups))
p = match.end()
match = COMMA(line,p)
if match:
p = match.end() # skip the comma
elif not TERMINATOR(line,p):
raise ValueError(
"Expected ',' or end-of-list in",line,"at",line[p:]
)
match = TERMINATOR(line,p)
if match: p = match.end() # skip the terminator, if any
return line, p, items
for line in lines:
match = DISTRO(line)
if not match:
raise ValueError("Missing distribution spec", line)
project_name = match.group(1)
p = match.end()
extras = []
match = OBRACKET(line,p)
if match:
p = match.end()
line, p, extras = scan_list(
DISTRO, CBRACKET, line, p, (1,), "'extra' name"
)
line, p, specs = scan_list(VERSION,LINE_END,line,p,(1,2),"version spec")
specs = [(op,safe_version(val)) for op,val in specs]
yield Requirement(project_name, specs, extras)
def _sort_dists(dists):
tmp = [(dist.hashcmp,dist) for dist in dists]
tmp.sort()
dists[::-1] = [d for hc,d in tmp]
class Requirement:
def __init__(self, project_name, specs, extras):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
self.unsafe_name, project_name = project_name, safe_name(project_name)
self.project_name, self.key = project_name, project_name.lower()
index = [(parse_version(v),state_machine[op],op,v) for op,v in specs]
index.sort()
self.specs = [(op,ver) for parsed,trans,op,ver in index]
self.index, self.extras = index, tuple(map(safe_extra,extras))
self.hashCmp = (
self.key, tuple([(op,parsed) for parsed,trans,op,ver in index]),
frozenset(self.extras)
)
self.__hash = hash(self.hashCmp)
def __str__(self):
specs = ','.join([''.join(s) for s in self.specs])
extras = ','.join(self.extras)
if extras: extras = '[%s]' % extras
return '%s%s%s' % (self.project_name, extras, specs)
def __eq__(self,other):
return isinstance(other,Requirement) and self.hashCmp==other.hashCmp
def __contains__(self,item):
if isinstance(item,Distribution):
if item.key <> self.key: return False
if self.index: item = item.parsed_version # only get if we need it
elif isinstance(item,basestring):
item = parse_version(item)
last = None
compare = lambda a, b: (a > b) - (a < b) # -1, 0, 1
for parsed,trans,op,ver in self.index:
action = trans[compare(item,parsed)] # Indexing: 0, 1, -1
if action=='F': return False
elif action=='T': return True
elif action=='+': last = True
elif action=='-' or last is None: last = False
if last is None: last = True # no rules encountered
return last
def __hash__(self):
return self.__hash
def __repr__(self): return "Requirement.parse(%r)" % str(self)
#@staticmethod
def parse(s, replacement=True):
reqs = list(parse_requirements(s))
if reqs:
if len(reqs) == 1:
founded_req = reqs[0]
# if asked for setuptools distribution
# and if distribute is installed, we want to give
# distribute instead
if _override_setuptools(founded_req) and replacement:
distribute = list(parse_requirements('distribute'))
if len(distribute) == 1:
return distribute[0]
return founded_req
else:
return founded_req
raise ValueError("Expected only one requirement", s)
raise ValueError("No requirements found", s)
parse = staticmethod(parse)
state_machine = {
# =><
'<' : '--T',
'<=': 'T-T',
'>' : 'F+F',
'>=': 'T+F',
'==': 'T..',
'!=': 'F++',
}
def _override_setuptools(req):
"""Return True when distribute wants to override a setuptools dependency.
We want to override when the requirement is setuptools and the version is
a variant of 0.6.
"""
if req.project_name == 'setuptools':
if not len(req.specs):
# Just setuptools: ok
return True
for comparator, version in req.specs:
if comparator in ['==', '>=', '>']:
if '0.7' in version:
# We want some setuptools not from the 0.6 series.
return False
return True
return False
def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls,type):
class cls(cls,object): pass
return cls.__mro__[1:]
return cls.__mro__
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def split_sections(s):
"""Split a string or iterable thereof into (section,content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args,**kw):
from tempfile import mkstemp
old_open = os.open
try:
os.open = os_open # temporarily bypass sandboxing
return mkstemp(*args,**kw)
finally:
os.open = old_open # and then put it back
# Set up global resource manager
_manager = ResourceManager()
def _initialize(g):
for name in dir(_manager):
if not name.startswith('_'):
g[name] = getattr(_manager, name)
_initialize(globals())
# Prepare the master working set and make the ``require()`` API available
working_set = WorkingSet()
try:
# Does the main program list any requirements?
from __main__ import __requires__
except ImportError:
pass # No: just use the default working set based on sys.path
else:
# Yes: ensure the requirements are met, by prefixing sys.path if necessary
try:
working_set.require(__requires__)
except VersionConflict: # try it without defaults already on sys.path
working_set = WorkingSet([]) # by starting with an empty path
for dist in working_set.resolve(
parse_requirements(__requires__), Environment()
):
working_set.add(dist)
for entry in sys.path: # add any missing entries from sys.path
if entry not in working_set.entries:
working_set.add_entry(entry)
sys.path[:] = working_set.entries # then copy back to sys.path
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
run_main = run_script # backward compatibility
# Activate all distributions already on sys.path, and ensure that
# all distributions added to the working set in the future (e.g. by
# calling ``require()``) will get activated as well.
add_activation_listener(lambda dist: dist.activate())
working_set.entries=[]; map(working_set.add_entry,sys.path) # match order
|
gpl-3.0
| 6,505,110,880,996,614,000
| 31.916199
| 94
| 0.602159
| false
| 4.255211
| false
| false
| false
|
ethz-asl/segmatch
|
segmappy/segmappy/models/model_groups_tf.py
|
1
|
6314
|
import tensorflow as tf
# define the cnn model
def init_model(input_shape, n_classes):
with tf.name_scope("InputScope") as scope:
cnn_input = tf.placeholder(
dtype=tf.float32, shape=(None,) + input_shape + (1,), name="input"
)
# base convolutional layers
y_true = tf.placeholder(dtype=tf.float32, shape=(None, n_classes), name="y_true")
scales = tf.placeholder(dtype=tf.float32, shape=(None, 3), name="scales")
training = tf.placeholder_with_default(
tf.constant(False, dtype=tf.bool), shape=(), name="training"
)
conv1 = tf.layers.conv3d(
inputs=cnn_input,
filters=32,
kernel_size=(3, 3, 3),
padding="same",
activation=tf.nn.relu,
use_bias=True,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name="conv1",
)
pool1 = tf.layers.max_pooling3d(
inputs=conv1, pool_size=(2, 2, 2), strides=(2, 2, 2), name="pool1"
)
conv2 = tf.layers.conv3d(
inputs=pool1,
filters=64,
kernel_size=(3, 3, 3),
padding="same",
activation=tf.nn.relu,
use_bias=True,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name="conv3",
)
pool2 = tf.layers.max_pooling3d(
inputs=conv2, pool_size=(2, 2, 2), strides=(2, 2, 2), name="pool2"
)
conv3 = tf.layers.conv3d(
inputs=pool2,
filters=64,
kernel_size=(3, 3, 3),
padding="same",
activation=tf.nn.relu,
use_bias=True,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
name="conv5",
)
flatten = tf.contrib.layers.flatten(inputs=conv3)
flatten = tf.concat([flatten, scales], axis=1, name="flatten")
# classification network
dense1 = tf.layers.dense(
inputs=flatten,
units=512,
activation=tf.nn.relu,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
use_bias=True,
name="dense1",
)
bn_dense1 = tf.layers.batch_normalization(
dense1, training=training, name="bn_dense1"
)
dropout_dense1 = tf.layers.dropout(
bn_dense1, rate=0.5, training=training, name="dropout_dense1"
)
descriptor = tf.layers.dense(
inputs=dropout_dense1,
units=64,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
activation=tf.nn.relu,
use_bias=True,
name="descriptor",
)
bn_descriptor = tf.layers.batch_normalization(
descriptor, training=training, name="bn_descriptor"
)
with tf.name_scope("OutputScope") as scope:
tf.add(bn_descriptor, 0, name="descriptor_bn_read")
tf.add(descriptor, 0, name="descriptor_read")
dropout_descriptor = tf.layers.dropout(
bn_descriptor, rate=0.35, training=training, name="dropout_descriptor"
)
y_pred = tf.layers.dense(
inputs=dropout_descriptor,
units=n_classes,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
activation=None,
use_bias=True,
name="classes",
)
loss_c = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(logits=y_pred, labels=y_true),
name="loss_c",
)
# reconstruction network
dec_dense1 = tf.layers.dense(
inputs=descriptor,
units=8192,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
activation=tf.nn.relu,
use_bias=True,
name="dec_dense1",
)
reshape = tf.reshape(dec_dense1, (tf.shape(cnn_input)[0], 8, 8, 4, 32))
dec_conv1 = tf.layers.conv3d_transpose(
inputs=reshape,
filters=32,
kernel_size=(3, 3, 3),
strides=(2, 2, 2),
padding="same",
use_bias=False,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
activation=tf.nn.relu,
name="dec_conv1",
)
dec_conv2 = tf.layers.conv3d_transpose(
inputs=dec_conv1,
filters=32,
kernel_size=(3, 3, 3),
strides=(2, 2, 2),
padding="same",
use_bias=False,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
activation=tf.nn.relu,
name="dec_conv2",
)
dec_reshape = tf.layers.conv3d_transpose(
inputs=dec_conv2,
filters=1,
kernel_size=(3, 3, 3),
padding="same",
use_bias=False,
kernel_initializer=tf.contrib.layers.xavier_initializer(),
activation=tf.nn.sigmoid,
name="dec_reshape",
)
reconstruction = dec_reshape
with tf.name_scope("ReconstructionScopeAE") as scope:
tf.add(reconstruction, 0, name="ae_reconstruction_read")
FN_TO_FP_WEIGHT = 0.9
loss_r = -tf.reduce_mean(
FN_TO_FP_WEIGHT * cnn_input * tf.log(reconstruction + 1e-10)
+ (1 - FN_TO_FP_WEIGHT) * (1 - cnn_input) * tf.log(1 - reconstruction + 1e-10)
)
tf.identity(loss_r, "loss_r")
# training
LOSS_R_WEIGHT = 200
LOSS_C_WEIGHT = 1
loss = tf.add(LOSS_C_WEIGHT * loss_c, LOSS_R_WEIGHT * loss_r, name="loss")
global_step = tf.Variable(0, trainable=False, name="global_step")
update_step = tf.assign(
global_step, tf.add(global_step, tf.constant(1)), name="update_step"
)
optimizer = tf.train.AdamOptimizer(learning_rate=0.0001)
# add batch normalization updates to the training operation
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss, name="train_op")
# statistics
y_prob = tf.nn.softmax(y_pred, name="y_prob")
correct_pred = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y_true, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name="accuracy")
roc_auc = tf.placeholder(dtype=tf.float32, shape=(), name="roc_auc")
with tf.name_scope("summary"):
tf.summary.scalar("loss", loss, collections=["summary_batch"])
tf.summary.scalar("loss_c", loss_c, collections=["summary_batch"])
tf.summary.scalar("loss_r", loss_r, collections=["summary_batch"])
tf.summary.scalar("accuracy", accuracy, collections=["summary_batch"])
tf.summary.scalar("roc_auc", roc_auc, collections=["summary_epoch"])
|
bsd-3-clause
| -7,130,928,450,581,762,000
| 29.8
| 86
| 0.606589
| false
| 3.387339
| false
| false
| false
|
ivanlyon/exercises
|
kattis/k_rockscissorspaper.py
|
1
|
3696
|
'''
Deliver state of Rock-Paper-Scissors style of Conway game of life
Status: Accepted
'''
###############################################################################
class RockPaperScissorsGrid():
'''Conway life-style grid'''
def __init__(self, grid):
self._grid = grid
self._rows = len(grid)
self._cols = len(grid[0])
def neighbors(self, row, col):
"List valid neighbors for grid location"
results = []
if row > 0:
results.append(self._grid[row - 1][col])
if col > 0:
results.append(self._grid[row][col - 1])
if row < self._rows - 1:
results.append(self._grid[row + 1][col])
if col < self._cols - 1:
results.append(self._grid[row][col + 1])
return ''.join(results)
def compete(self, days):
"Perform all the changes resulting from competition over some days"
trumped_by = {'S': 'R', 'P': 'S', 'R': 'P'}
newline = [''] * self._cols
for _ in range(days):
newgrid = [''] * self._rows
for row, text in enumerate(self._grid):
for col, glyph in enumerate(text):
if trumped_by[glyph] in self.neighbors(row, col):
newline[col] = trumped_by[glyph]
else:
newline[col] = glyph
newgrid[row] = ''.join(newline)
self._grid = newgrid
def __repr__(self):
return '\n'.join(self._grid)
###############################################################################
def main():
"""Read input and print output"""
for test_case in range(int(input())):
if test_case:
print()
rows, _, days = [int(i) for i in input().split()]
grid = []
for _ in range(rows):
grid.append(input())
rpsg = RockPaperScissorsGrid(grid)
rpsg.compete(days)
print(rpsg)
###############################################################################
def demo():
'''RPS animation over random grid'''
import matplotlib.pyplot as plt
from matplotlib import animation
from random import choice
# Test case data
rows, columns = 20, 20
matrix = []
for _r in range(rows):
matrix.append(''.join([choice('RRRRRPS') for _ in range(columns)]))
days = 24
# Console display
rpsg = RockPaperScissorsGrid(matrix)
print("Random input:")
print(rpsg)
print("\nDay", days, "Output:")
rpsg.compete(days)
print(rpsg)
_fig, axes = plt.subplots()
plt.suptitle('Rock-Paper-Scissors Demonstration (Random Grid)')
axes.axis('off')
rpsg2 = None
bg_color = {'R': '#FFBBBB', 'P': '#BBFFBB', 'S': '#BBBBFF'}
def animate(i):
'''Show Rock Paper Scissors grid per day'''
nonlocal rpsg2
if i:
rpsg2.compete(1)
else:
rpsg2 = RockPaperScissorsGrid(matrix)
table = plt.table(cellText=rpsg2._grid, loc='center', cellLoc='center')
axes.title.set_text('Day {:d}'.format(i))
for the_cell in table.get_children():
the_cell.set_facecolor(bg_color[the_cell.get_text().get_text()])
_ = animation.FuncAnimation(_fig, animate, frames=days+1, interval=500, blit=False)
plt.show()
###############################################################################
if __name__ == '__main__':
import argparse
PARSER = argparse.ArgumentParser()
PARSER.add_argument("--demo", help="demonstration of rock-paper-scissors", action="store_true")
ARGS = PARSER.parse_args()
if ARGS.demo:
demo()
else:
main()
|
mit
| 4,729,091,299,634,914,000
| 29.295082
| 99
| 0.502706
| false
| 3.767584
| false
| false
| false
|
CollabQ/CollabQ
|
invite/views.py
|
1
|
2741
|
# Copyright 2010 http://www.collabq.com
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django import http
from django import template
from django.conf import settings
from django.template import loader
from common import api
from common import display
from common import util
from common import views as common_views
def invite_email(request, code):
"""User has received the invite email, and has followed the link to accept or
or refuse it."""
if request.user:
handled = common_views.handle_view_action(
request,
{'invite_accept': request.user.url('/overview'),
'invite_reject': request.user.url('/overview')
}
)
if handled:
return handled
# Retrieve the invite
invite = api.invite_get(api.ROOT, code)
from_actor = invite.from_actor
# Translate the from_actor into a display name
from_actor_ref = api.actor_get(api.ROOT, from_actor)
view = from_actor_ref
if not from_actor_ref:
# Corner case: from_actor was deleted since the invite was sent.
# In this case, do we want to consider the invitation invalid?
# (probably we do, because it's more likely that it was spam)
return util.RedirectError("That invite is no longer valid")
# We use api.ROOT in the next set of functions because the
# invite is giving possibly private access to the user
inbox = api.inbox_get_actor_contacts(api.ROOT,
view.nick,
limit=5)
entries = api.entry_get_entries(api.ROOT, inbox)
stream_keys = [e.stream for e in entries]
streams = api.stream_get_streams(api.ROOT, stream_keys)
actor_nicks = ([view.nick] +
[s.owner for s in streams.values() if s] +
[e.owner for e in entries] +
[e.actor for e in entries])
actors = api.actor_get_actors(api.ROOT, actor_nicks)
streams = display.prep_stream_dict(streams, actors)
entries = display.prep_entry_list(entries, streams, actors)
sidebar_green_top = True
c = template.RequestContext(request, locals())
t = loader.get_template('invite/templates/email.html')
return http.HttpResponse(t.render(c))
|
apache-2.0
| -7,093,654,507,555,927,000
| 33.708861
| 79
| 0.684422
| false
| 3.910128
| false
| false
| false
|
kayhayen/Nuitka
|
nuitka/nodes/ReturnNodes.py
|
1
|
6750
|
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Return node
This one exits functions. The only other exit is the default exit of functions with 'None' value, if no return is done.
"""
from abc import abstractmethod
from .NodeBases import StatementBase, StatementChildHavingBase
class StatementReturn(StatementChildHavingBase):
kind = "STATEMENT_RETURN"
named_child = "expression"
nice_child = "return value"
def __init__(self, expression, source_ref):
assert expression
StatementChildHavingBase.__init__(self, value=expression, source_ref=source_ref)
@staticmethod
def mayReturn():
return True
@staticmethod
def isStatementAborting():
return True
def mayRaiseException(self, exception_type):
return self.subnode_expression.mayRaiseException(exception_type)
def computeStatement(self, trace_collection):
expression = trace_collection.onExpression(self.subnode_expression)
if expression.mayRaiseException(BaseException):
trace_collection.onExceptionRaiseExit(BaseException)
if expression.willRaiseException(BaseException):
from .NodeMakingHelpers import (
makeStatementExpressionOnlyReplacementNode,
)
result = makeStatementExpressionOnlyReplacementNode(
expression=expression, node=self
)
return (
result,
"new_raise",
"""\
Return statement raises in returned expression, removed return.""",
)
trace_collection.onFunctionReturn()
if expression.isExpressionConstantRef():
result = makeStatementReturnConstant(
constant=expression.getCompileTimeConstant(), source_ref=self.source_ref
)
del self.parent
return (
result,
"new_statements",
"""\
Return value is constant.""",
)
return self, None, None
class StatementReturnConstantBase(StatementBase):
__slots__ = ()
def __init__(self, source_ref):
StatementBase.__init__(self, source_ref=source_ref)
@staticmethod
def isStatementReturn():
return True
@staticmethod
def isStatementReturnConstant():
return True
@staticmethod
def isStatementAborting():
return True
@staticmethod
def mayReturn():
return True
@staticmethod
def mayRaiseException(exception_type):
return False
def computeStatement(self, trace_collection):
trace_collection.onFunctionReturn()
return self, None, None
@abstractmethod
def getConstant(self):
"""The returned constant value."""
@staticmethod
def getStatementNiceName():
return "return statement"
class StatementReturnNone(StatementReturnConstantBase):
kind = "STATEMENT_RETURN_NONE"
__slots__ = ()
def __init__(self, source_ref):
StatementReturnConstantBase.__init__(self, source_ref=source_ref)
def finalize(self):
del self.parent
def getConstant(self):
return None
class StatementReturnFalse(StatementReturnConstantBase):
kind = "STATEMENT_RETURN_FALSE"
__slots__ = ()
def __init__(self, source_ref):
StatementReturnConstantBase.__init__(self, source_ref=source_ref)
def finalize(self):
del self.parent
def getConstant(self):
return False
class StatementReturnTrue(StatementReturnConstantBase):
kind = "STATEMENT_RETURN_TRUE"
__slots__ = ()
def __init__(self, source_ref):
StatementReturnConstantBase.__init__(self, source_ref=source_ref)
def finalize(self):
del self.parent
def getConstant(self):
return True
class StatementReturnConstant(StatementReturnConstantBase):
kind = "STATEMENT_RETURN_CONSTANT"
__slots__ = ("constant",)
def __init__(self, constant, source_ref):
StatementReturnConstantBase.__init__(self, source_ref=source_ref)
self.constant = constant
def finalize(self):
del self.parent
del self.constant
def getConstant(self):
return self.constant
def getDetails(self):
return {"constant": self.constant}
class StatementReturnReturnedValue(StatementBase):
kind = "STATEMENT_RETURN_RETURNED_VALUE"
__slots__ = ()
def __init__(self, source_ref):
StatementBase.__init__(self, source_ref=source_ref)
def finalize(self):
del self.parent
@staticmethod
def isStatementReturnReturnedValue():
return True
@staticmethod
def isStatementReturn():
return True
@staticmethod
def isStatementAborting():
return True
@staticmethod
def mayReturn():
return True
@staticmethod
def mayRaiseException(exception_type):
return False
def computeStatement(self, trace_collection):
trace_collection.onFunctionReturn()
return self, None, None
@staticmethod
def getStatementNiceName():
return "rereturn statement"
def makeStatementReturnConstant(constant, source_ref):
if constant is None:
return StatementReturnNone(source_ref=source_ref)
elif constant is True:
return StatementReturnTrue(source_ref=source_ref)
elif constant is False:
return StatementReturnFalse(source_ref=source_ref)
else:
return StatementReturnConstant(constant=constant, source_ref=source_ref)
def makeStatementReturn(expression, source_ref):
"""Create the best return statement variant."""
if expression is None:
return StatementReturnNone(source_ref=source_ref)
elif expression.isCompileTimeConstant():
return makeStatementReturnConstant(
constant=expression.getCompileTimeConstant(), source_ref=source_ref
)
else:
return StatementReturn(expression=expression, source_ref=source_ref)
|
apache-2.0
| 6,049,847,874,321,408,000
| 25.162791
| 119
| 0.661185
| false
| 4.530201
| false
| false
| false
|
pdinges/python-schoof
|
support/profiling.py
|
1
|
11632
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010--2012 Peter Dinges <pdinges@acm.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Class decorators for more expressive call profiles.
The Python profiler in the @c cProfile module registers methods as elements
of the class that contains their source code. Thus, @c cProfile merges calling
statistics of the defining class and subclasses that do not re-implement the
methods (such as template class specializations). The decorator
@c local_method_names() addresses this problem. Also, in case of nested template
classes (say, elliptic_curves.polynomials.naive.CurvePolynomials),
the template parameters bloat the type name; the decorator @c profiling_name()
alleviates the issue.
@package support.profiling
@author Peter Dinges <pdinges@acm.org>
"""
def profiling_name(name):
"""
A class decorator that sets the @p name that will show up in profiles
generated with the @c cProfile module.
Usage example:
@code
@local_method_names
@profiling_name( "GF<{_size}>" )
class FiniteField( Field, metaclass=template( "_size" ) ):
...
@endcode
@param name A string that contains the class name (which is usually
shorter than the original name). For template classes, the
string may contain format-string variables for the
parameters.
@note The function requires the @c local_method_name() decorator to show
an effect.
@see The sources of rings.quotients.naive.QuotientRing for a usage
example and support.types.template() for information about template
classes.
"""
def class_wrapper(cls):
setattr(cls, "__profiling_name__", name)
return cls
return class_wrapper
from .types import is_incomplete
def local_method_names(cls):
"""
A class decorator that makes the function names used by the @c cProfile
module local to the class of use (rather than the class of definition).
The @c cProfile module uses the name of a function's code object as the
profile name. Therefore, calls to methods in subclasses that do not
re-implement the method are counted as calls to the parent class. In
template classes, this makes the call profile too coarse.
Use profiling_name() to get shorter class names.
@note The decorator copies the code objects; it "flattens" the class.
Therefore, re-assigning methods will annihilate the decorator
effects for the method.
@see The sources of rings.quotients.naive.QuotientRing for a usage
example and support.types.template() for information about template
classes.
"""
if getattr( cls.__class__, "__operation_renaming__", False ):
return cls
original_new = cls.__class__.__new__
def prefixing_new(meta_class, class_name, bases, class_dict, **kw_arguments):
class_object = original_new( meta_class, class_name, bases, class_dict, **kw_arguments )
if is_incomplete( class_object ):
return class_object
if "__operation_renaming__" in class_object.__dict__:
return class_object
profiling_name = __profiling_str( class_object )
__localize_method_names(
class_object.__class__,
[ "__new__", "__call__" ],
"{cls}::<meta>{{meth}}".format( cls = profiling_name )
)
__flatten_methods( class_object )
__localize_method_names(
class_object,
__method_names( class_object ),
"{cls}::{{meth}}".format( cls = profiling_name )
)
setattr( class_object, "__operation_renaming__", True )
return class_object
cls.__class__.__new__ = prefixing_new
setattr( cls.__class__, "__operation_renaming__", True )
return cls
def rename_function( function, name, filename=None, firstlineno=-1 ):
"""
Rename a function and its associated code object.
This is handy when using the @c profile and @c cProfile modules:
both retrieve the function names from the code object
(the @c co_name attribute); @c __name__ is ignored.
"""
# Renaming a function in the profile thus requires generating a new
# code object. As CodeType.__doc__ notes, this is not for the
# faint of heart.
# Modify the unbound function object of methods
if hasattr( function, "__func__" ):
function = function.__func__
try:
code = function.__code__
except AttributeError:
message = "expected '{func}' to have an associated code object ('__code__' attribute)"
raise ValueError( message.format( function ) )
# Copy old values if unspecified
if filename is None:
filename = code.co_filename
if firstlineno == -1:
firstlineno = code.co_firstlineno
renamed_code = types.CodeType(
code.co_argcount,
code.co_kwonlyargcount,
code.co_nlocals,
code.co_stacksize,
code.co_flags,
code.co_code,
code.co_consts,
code.co_names,
code.co_varnames,
str( filename ),
str( name ),
int( firstlineno ),
code.co_lnotab,
code.co_freevars,
code.co_cellvars
)
function.__name__ = str( name )
function.__code__ = renamed_code
def __method_names( class_object ):
"""
Return a dictionary with the methods defined in the class (not counting
inherited methods).
This function is not intended for direct use.
"""
return [ key for key, value in class_object.__dict__.items() \
if type( value ) in function_types ]
def __localize_method_names( class_object, method_names, format_string ):
"""
Make all inherited (and not re-implemented) methods local to the class
and rename them accordingly. That way, the @c cProfile module
distinguishes between calls the original and the inherited implementation.
This function is not intended for direct use.
"""
for method_name in method_names:
method = __get_dict_item( class_object, method_name )
method_copy = __copy_function( method )
new_name = format_string.format( meth = method_name )
rename_function( method_copy, new_name )
setattr( class_object, method_name, method_copy )
def __flatten_methods( class_object ):
"""
Copy all inherited (and not re-implemented) methods to the local
class dictionary.
This function is not intended for direct use.
"""
for attribute_name in dir( class_object ):
# Skip local attributes
if attribute_name in class_object.__dict__:
continue
# Skip non-method attributes (for example class variables)
method = __get_dict_item( class_object, attribute_name )
if type( method ) not in function_types:
continue
method_copy = __copy_function( __unwrap( method ) )
setattr(class_object, attribute_name, method_copy )
def __get_dict_item( class_object, key ):
"""
Return the class dictionary entry with key @p key; traverse the parent
classes until a matching entry was found. Otherwise raise an
@c AttributeError.
This function is not intended for direct use.
"""
for cls in class_object.__mro__:
if key in cls.__dict__:
return cls.__dict__[ key ]
message = "object '{name}' has no attribute '{key}'"
raise AttributeError(
message.format( name = class_object.__name__, key = key )
)
def __copy_function( function ):
"""
Create a completely independent copy of @p function.
The function also copies the code object of @p function.
This function is not intended for direct use.
"""
if type( function ) in [ staticmethod, classmethod ]:
return type( function )( __copy_function( function.__func__ ) )
if type( function ) not in [ types.FunctionType, types.MethodType ]:
message = "expected function or method type (got {0})"
raise ValueError( message.format( function ) )
if type( function ) is types.MethodType:
function = function.__func__
code = function.__code__
code_copy = types.CodeType(
code.co_argcount,
code.co_kwonlyargcount,
code.co_nlocals,
code.co_stacksize,
code.co_flags,
code.co_code,
code.co_consts,
code.co_names,
code.co_varnames,
code.co_filename,
code.co_name,
code.co_firstlineno,
code.co_lnotab,
code.co_freevars,
code.co_cellvars
)
function_copy = types.FunctionType(
code_copy,
function.__globals__,
function.__name__,
function.__defaults__,
function.__closure__
)
# Re-bind methods to their instance
if type( function ) is types.MethodType:
return types.MethodType( function_copy, function.__self__)
return function_copy
def __unwrap( method ):
"""
Return the original function inside a method wrapper.
This function is not intended for direct use.
@see support.operators.operand_casting()
"""
if hasattr( method, "__wrapped_method__" ):
return __unwrap( getattr( method, "__wrapped_method__" ) )
return method
def __profiling_str(obj):
"""
Return the formatted name of @p obj as set by the @c profiling_name()
decorator. Fall back to __str__() if necessary.
This function is not intended for direct use.
"""
# FIXME: Endless recursion for cyclic dependencies.
if isinstance(obj, type) and hasattr(obj, "__profiling_name__"):
if hasattr( obj.__class__, "__parameter_map__" ):
args = [ (k, __profiling_str(v)) for k,v in obj.__class__.__parameter_map__.items() ]
else:
args = []
try:
return obj.__profiling_name__.format( **dict( args ) )
except KeyError:
pass
return str(obj)
import types
function_types = [
types.FunctionType,
types.MethodType,
staticmethod,
classmethod,
]
|
gpl-3.0
| -6,382,795,003,153,329,000
| 33.11437
| 97
| 0.588635
| false
| 4.499807
| false
| false
| false
|
OxES/OxKeplerSC
|
src/cbvc/VBLinRegARD.py
|
1
|
3845
|
import numpy, sys
import scipy.linalg, scipy.special
'''
VBLinRegARD: Linear basis regression with automatic relevance priors
using Variational Bayes.
For more details on the algorithm see Apprendix of
Roberts, McQuillan, Reece & Aigrain, 2013, MNRAS, 354, 3639.
History:
2011: Translated by Thomas Evans from original Matlab code by Stephen J Roberts
2013: Documentation added by Suzanne Aigrain
'''
def logdet(a):
'''
Compute log of determinant of matrix a using Cholesky decomposition
'''
# First make sure that matrix is symmetric:
if numpy.allclose(a.T,a) == False:
print ('MATRIX NOT SYMMETRIC')
# Second make sure that matrix is positive definite:
eigenvalues = scipy.linalg.eigvalsh(a)
if min(eigenvalues) <=0:
print ('Matrix is NOT positive-definite')
print (' min eigv = %.16f' % min(eigenvalues))
step1 = scipy.linalg.cholesky(a)
step2 = numpy.diag(step1.T)
out = 2. * numpy.sum(numpy.log(step2), axis=0)
return out
def bayes_linear_fit_ard(X, y):
'''
Fit linear basis model with design matrix X to data y.
Calling sequence:
w, V, invV, logdetV, an, bn, E_a, L = bayes_linear_fit_ard(X, y)
Inputs:
X: design matrix
y: target data
Outputs
w: basis function weights
***need to document the others!***
'''
# uninformative priors
a0 = 1e-2
b0 = 1e-4
c0 = 1e-2
d0 = 1e-4
# pre-process data
[N, D] = X.shape
X_corr = X.T * X
Xy_corr = X.T * y
an = a0 + N / 2.
gammaln_an = scipy.special.gammaln(an)
cn = c0 + 1 / 2.
D_gammaln_cn = D * scipy.special.gammaln(cn)
# iterate to find hyperparameters
L_last = -sys.float_info.max
max_iter = 500
E_a = numpy.matrix(numpy.ones(D) * c0 / d0).T
for iter in range(max_iter):
# covariance and weight of linear model
invV = numpy.matrix(numpy.diag(numpy.array(E_a)[:,0])) + X_corr
V = numpy.matrix(scipy.linalg.inv(invV))
logdetV = -logdet(invV)
w = numpy.dot(V, Xy_corr)[:,0]
# parameters of noise model (an remains constant)
sse = numpy.sum(numpy.power(X*w-y, 2), axis=0)
if numpy.imag(sse)==0:
sse = numpy.real(sse)[0]
else:
print ('Something went wrong')
bn = b0 + 0.5 * (sse + numpy.sum((numpy.array(w)[:,0]**2) * numpy.array(E_a)[:,0], axis=0))
E_t = an / bn
# hyperparameters of covariance prior (cn remains constant)
dn = d0 + 0.5 * (E_t * (numpy.array(w)[:,0]**2) + numpy.diag(V))
E_a = numpy.matrix(cn / dn).T
# variational bound, ignoring constant terms for now
L = -0.5 * (E_t*sse + numpy.sum(scipy.multiply(X,X*V))) + \
0.5 * logdetV - b0 * E_t + gammaln_an - an * scipy.log(bn) + an + \
D_gammaln_cn - cn * numpy.sum(scipy.log(dn))
# variational bound must grow!
if L_last > L:
# if this happens, then something has gone wrong....
file = open('ERROR_LOG','w')
file.write('Last bound %6.6f, current bound %6.6f' % (L, L_last))
file.close()
raise Exception('Variational bound should not reduce - see ERROR_LOG')
return
# stop if change in variation bound is < 0.001%
if abs(L_last - L) < abs(0.00001 * L):
break
# print L, L_last
L_last = L
if iter == max_iter:
warnings.warn('Bayes:maxIter ... Bayesian linear regression reached maximum number of iterations.')
# augment variational bound with constant terms
L = L - 0.5 * (N * numpy.log(2 * numpy.pi) - D) - scipy.special.gammaln(a0) + \
a0 * numpy.log(b0) + D * (-scipy.special.gammaln(c0) + c0 * numpy.log(d0))
return w, V, invV, logdetV, an, bn, E_a, L
|
gpl-3.0
| 4,759,305,985,593,180,000
| 35.971154
| 108
| 0.584135
| false
| 3.143908
| false
| false
| false
|
google/material-design-icons
|
update/venv/lib/python3.9/site-packages/fontTools/misc/xmlReader.py
|
5
|
4764
|
from fontTools import ttLib
from fontTools.misc.textTools import safeEval
from fontTools.ttLib.tables.DefaultTable import DefaultTable
import sys
import os
import logging
log = logging.getLogger(__name__)
class TTXParseError(Exception): pass
BUFSIZE = 0x4000
class XMLReader(object):
def __init__(self, fileOrPath, ttFont, progress=None, quiet=None, contentOnly=False):
if fileOrPath == '-':
fileOrPath = sys.stdin
if not hasattr(fileOrPath, "read"):
self.file = open(fileOrPath, "rb")
self._closeStream = True
else:
# assume readable file object
self.file = fileOrPath
self._closeStream = False
self.ttFont = ttFont
self.progress = progress
if quiet is not None:
from fontTools.misc.loggingTools import deprecateArgument
deprecateArgument("quiet", "configure logging instead")
self.quiet = quiet
self.root = None
self.contentStack = []
self.contentOnly = contentOnly
self.stackSize = 0
def read(self, rootless=False):
if rootless:
self.stackSize += 1
if self.progress:
self.file.seek(0, 2)
fileSize = self.file.tell()
self.progress.set(0, fileSize // 100 or 1)
self.file.seek(0)
self._parseFile(self.file)
if self._closeStream:
self.close()
if rootless:
self.stackSize -= 1
def close(self):
self.file.close()
def _parseFile(self, file):
from xml.parsers.expat import ParserCreate
parser = ParserCreate()
parser.StartElementHandler = self._startElementHandler
parser.EndElementHandler = self._endElementHandler
parser.CharacterDataHandler = self._characterDataHandler
pos = 0
while True:
chunk = file.read(BUFSIZE)
if not chunk:
parser.Parse(chunk, 1)
break
pos = pos + len(chunk)
if self.progress:
self.progress.set(pos // 100)
parser.Parse(chunk, 0)
def _startElementHandler(self, name, attrs):
if self.stackSize == 1 and self.contentOnly:
# We already know the table we're parsing, skip
# parsing the table tag and continue to
# stack '2' which begins parsing content
self.contentStack.append([])
self.stackSize = 2
return
stackSize = self.stackSize
self.stackSize = stackSize + 1
subFile = attrs.get("src")
if subFile is not None:
if hasattr(self.file, 'name'):
# if file has a name, get its parent directory
dirname = os.path.dirname(self.file.name)
else:
# else fall back to using the current working directory
dirname = os.getcwd()
subFile = os.path.join(dirname, subFile)
if not stackSize:
if name != "ttFont":
raise TTXParseError("illegal root tag: %s" % name)
sfntVersion = attrs.get("sfntVersion")
if sfntVersion is not None:
if len(sfntVersion) != 4:
sfntVersion = safeEval('"' + sfntVersion + '"')
self.ttFont.sfntVersion = sfntVersion
self.contentStack.append([])
elif stackSize == 1:
if subFile is not None:
subReader = XMLReader(subFile, self.ttFont, self.progress)
subReader.read()
self.contentStack.append([])
return
tag = ttLib.xmlToTag(name)
msg = "Parsing '%s' table..." % tag
if self.progress:
self.progress.setLabel(msg)
log.info(msg)
if tag == "GlyphOrder":
tableClass = ttLib.GlyphOrder
elif "ERROR" in attrs or ('raw' in attrs and safeEval(attrs['raw'])):
tableClass = DefaultTable
else:
tableClass = ttLib.getTableClass(tag)
if tableClass is None:
tableClass = DefaultTable
if tag == 'loca' and tag in self.ttFont:
# Special-case the 'loca' table as we need the
# original if the 'glyf' table isn't recompiled.
self.currentTable = self.ttFont[tag]
else:
self.currentTable = tableClass(tag)
self.ttFont[tag] = self.currentTable
self.contentStack.append([])
elif stackSize == 2 and subFile is not None:
subReader = XMLReader(subFile, self.ttFont, self.progress, contentOnly=True)
subReader.read()
self.contentStack.append([])
self.root = subReader.root
elif stackSize == 2:
self.contentStack.append([])
self.root = (name, attrs, self.contentStack[-1])
else:
l = []
self.contentStack[-1].append((name, attrs, l))
self.contentStack.append(l)
def _characterDataHandler(self, data):
if self.stackSize > 1:
self.contentStack[-1].append(data)
def _endElementHandler(self, name):
self.stackSize = self.stackSize - 1
del self.contentStack[-1]
if not self.contentOnly:
if self.stackSize == 1:
self.root = None
elif self.stackSize == 2:
name, attrs, content = self.root
self.currentTable.fromXML(name, attrs, content, self.ttFont)
self.root = None
class ProgressPrinter(object):
def __init__(self, title, maxval=100):
print(title)
def set(self, val, maxval=None):
pass
def increment(self, val=1):
pass
def setLabel(self, text):
print(text)
|
apache-2.0
| 1,176,087,762,665,243,600
| 27.023529
| 86
| 0.690386
| false
| 3.13834
| false
| false
| false
|
mainakibui/dkobo
|
dkobo/koboform/views/survey_draft_views.py
|
1
|
11283
|
import json
import requests
import pyxform.survey_from
from guardian.shortcuts import assign_perm
from django.http import HttpResponseBadRequest, HttpResponse, HttpResponseRedirect
from django.conf import settings
from django.utils.encoding import smart_unicode
from django.contrib.auth.decorators import login_required
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view
from rest_framework.authtoken.models import Token
from dkobo.koboform.models import SurveyDraft
from dkobo.koboform.serializers import ListSurveyDraftSerializer, DetailSurveyDraftSerializer
from dkobo.koboform.kobo_to_xlsform import convert_any_kobo_features_to_xlsform_survey_structure
from dkobo.koboform import pyxform_utils, kobocat_integration, xlform
def export_form(request, id):
survey_draft = SurveyDraft.objects.get(pk=id)
file_format = request.GET.get('format', 'xml')
if file_format == "xml":
contents = survey_draft.to_xml()
mimetype = 'application/force-download'
# content_length = len(contents) + 2 # the length of the string != the length of the file
elif file_format == "xls":
contents = survey_draft.to_xls()
mimetype = 'application/vnd.ms-excel; charset=utf-8'
# contents.read()
# content_length = contents.tell()
# contents.seek(0)
elif file_format == "csv":
contents = survey_draft.body
mimetype = 'text/csv; charset=utf-8'
# content_length = len(contents)
else:
return HttpResponseBadRequest(
"Format not supported: '%s'. Supported formats are [xml,xls,csv]." % file_format)
response = HttpResponse(contents, mimetype=mimetype)
response['Content-Disposition'] = 'attachment; filename=%s.%s' % (survey_draft.id_string,
file_format)
# response['Content-Length'] = content_length
return response
# def export_all_questions(request):
# queryset = SurveyDraft.objects.filter(user=request.user)
# queryset = queryset.exclude(asset_type=None)
# from dkobo.koboform import pyxform_utils
# response = HttpResponse(pyxform_utils.convert_csv_to_xls(concentrated_csv), mimetype='application/vnd.ms-excel; charset=utf-8')
# response['Content-Disposition'] = 'attachment; filename=all_questions.xls'
# return response
@login_required
def create_survey_draft(request):
raw_draft = json.loads(request.body)
name = raw_draft.get('title', raw_draft.get('name'))
csv_details = {u'user': request.user,
u'body': raw_draft.get("body"),
u'description': raw_draft.get("description"),
u'name': name}
survey_draft = SurveyDraft.objects.create(**csv_details)
return HttpResponse(json.dumps(model_to_dict(survey_draft)))
@login_required
@api_view(['GET', 'PUT', 'DELETE', 'PATCH'])
def survey_draft_detail(request, pk, format=None):
kwargs = {'pk': pk}
if not request.user.is_superuser:
kwargs['user'] = request.user
try:
survey_draft = SurveyDraft.objects.get(**kwargs)
except SurveyDraft.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = DetailSurveyDraftSerializer(survey_draft)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = DetailSurveyDraftSerializer(survey_draft, data=request.DATA)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'PATCH':
for key, value in request.DATA.items():
if key == 'tags':
survey_draft.tags.clear()
for val in value: survey_draft.tags.add(val)
else:
survey_draft.__setattr__(key, value)
survey_draft.save()
return Response(DetailSurveyDraftSerializer(survey_draft).data)
elif request.method == 'DELETE':
survey_draft.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
XLS_CONTENT_TYPES = [
"application/vnd.ms-excel",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"application/octet-stream",
]
@login_required
def bulk_delete_questions(request):
question_ids = json.loads(request.body)
SurveyDraft.objects.filter(user=request.user).filter(id__in=question_ids).delete()
return HttpResponse('')
@login_required
def import_survey_draft(request):
"""
Imports an XLS or CSV file into the user's SurveyDraft list.
Returns an error in JSON if the survey was not valid.
"""
output = {}
posted_file = request.FILES.get(u'files')
response_code = 200
if not posted_file:
response_code = 204 # Error 204: No input
output[u'error'] = "No file posted"
elif posted_file.name.endswith('.xml'):
warnings = []
try:
survey_object = pyxform.survey_from.xform(filelike_obj=posted_file, warnings=warnings)
_csv = survey_object.to_csv(warnings=warnings, koboform=True).read()
new_survey_draft = SurveyDraft.objects.create(**{
u'body': smart_unicode(_csv),
u'name': posted_file.name,
u'user': request.user
})
output[u'survey_draft_id'] = new_survey_draft.id
except Exception, err:
response_code = 500
output[u'error'] = err.message or str(err)
output[u'warnings'] = warnings
else:
try:
# create and validate the xform but ignore the results
warnings = []
pyxform_utils.validate_kobo_xlsform(posted_file, warnings=warnings)
output[u'xlsform_valid'] = True
posted_file.seek(0)
if posted_file.content_type in XLS_CONTENT_TYPES:
_csv = pyxform_utils.convert_xls_to_csv_string(posted_file)
elif posted_file.content_type == "text/csv":
_csv = posted_file.read()
else:
raise Exception("Content-type not recognized: '%s'" % posted_file.content_type)
new_survey_draft = SurveyDraft.objects.create(**{
u'body': smart_unicode(_csv),
u'name': posted_file.name,
u'user': request.user
})
output[u'survey_draft_id'] = new_survey_draft.id
except Exception, err:
response_code = 500
output[u'error'] = err.message or str(err)
return HttpResponse(json.dumps(output), content_type="application/json", status=response_code)
@login_required
def import_questions(request):
"""
Imports an XLS or CSV file into the user's SurveyDraft list.
Returns an error in JSON if the survey was not valid.
"""
output = {}
posted_file = request.FILES.get(u'files')
response_code = 200
if posted_file:
posted_file.seek(0)
if posted_file.content_type in XLS_CONTENT_TYPES:
imported_sheets_as_csv = pyxform_utils.convert_xls_to_csv_string(posted_file)
elif posted_file.content_type == "text/csv":
imported_sheets_as_csv = posted_file.read()
else:
raise Exception("Content-type not recognized: '%s'" % posted_file.content_type)
split_surveys = xlform.split_apart_survey(imported_sheets_as_csv)
new_survey_drafts = []
for _split_survey in split_surveys:
sd = SurveyDraft(name='New Form',
body=_split_survey[0],
user=request.user,
asset_type='question')
sd._summarize()
new_survey_drafts.append(sd)
SurveyDraft.objects.bulk_create(new_survey_drafts)
output[u'survey_draft_id'] = -1
else:
response_code = 204 # Error 204: No input
output[u'error'] = "No file posted"
return HttpResponse(json.dumps(output), content_type="application/json", status=response_code)
@login_required
@api_view(['GET', 'POST'])
def publish_survey_draft(request, pk, format=None):
if not kobocat_integration._is_enabled():
return Response({'error': 'KoBoCat Server not specified'}, status=status.HTTP_503_SERVICE_UNAVAILABLE)
try:
survey_draft = SurveyDraft.objects.get(pk=pk, user=request.user)
except SurveyDraft.DoesNotExist:
return Response({'error': 'SurveyDraft not found'}, status=status.HTTP_404_NOT_FOUND)
# convert csv to ss_struct
ss_struct = pyxform_utils.convert_csv_to_ss_structure(survey_draft.body)
form_id_string = request.DATA.get('id_string', False)
# set the form_id based on the payload
if 'settings' not in ss_struct:
ss_struct['settings'] = []
if len(ss_struct['settings']) == 0:
ss_struct['settings'].append({})
ss_struct['settings'][0]['form_id'] = form_id_string
# convert kobo-specific data structures into valid xlsform (e.g. score, rank)
xlsform_ss_struct = convert_any_kobo_features_to_xlsform_survey_structure(ss_struct)
valid_xlsform_csv_repr = pyxform_utils.convert_ss_structure_to_csv(xlsform_ss_struct)
_set_necessary_permissions(request.user)
(token, is_new) = Token.objects.get_or_create(user=request.user)
headers = {u'Authorization':'Token ' + token.key}
payload = {u'text_xls_form': valid_xlsform_csv_repr}
try:
url = kobocat_integration._kobocat_url('/api/v1/forms', internal=True)
response = requests.post(url, headers=headers, data=payload)
status_code = response.status_code
resp = response.json()
except Exception, e:
resp = {'status_code': 504, 'detail': str(e)}
status_code = 504
if 'formid' in resp:
survey_draft.kobocat_published_form_id = resp[u'formid']
survey_draft.save()
serializer = DetailSurveyDraftSerializer(survey_draft)
resp.update({
u'message': 'Successfully published form',
u'published_form_url': kobocat_integration._kobocat_url('/%s/forms/%s' % (request.user.username, resp.get('id_string')))
})
return Response(resp, status=status_code)
def _set_necessary_permissions(user):
"""
defeats the point of permissions, yes. But might get things working for now until we understand
the way kobocat uses permissions.
"""
necessary_perms = {'logger': ['add_datadictionary', 'add_xform', 'change_datadictionary', \
'change_xform', 'delete_datadictionary', 'delete_xform', \
'report_xform', 'view_xform',]}
for app, perms in necessary_perms.items():
for perm in perms:
assign_perm('%s.%s' % (app, perm), user)
def published_survey_draft_url(request, pk):
try:
survey_draft = SurveyDraft.objects.get(pk=pk, user=request.user)
except SurveyDraft.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
username = survey_draft.user.name
return HttpResponseRedirect(kobocat_integration._kobocat_url("/%s" % username))
|
agpl-3.0
| -7,562,220,105,088,083,000
| 38.869258
| 133
| 0.639901
| false
| 3.79388
| false
| false
| false
|
CollectQT/qapc
|
test/test_utils.py
|
1
|
5877
|
# builtin
import os
import sys
############################################################
# utils / setup
############################################################
base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
sys.path.append(base_dir)
from lib import utils, file_load
############################################################
# tests
############################################################
def test_true(): assert True
def test_video_add_worker_and_roles():
shoot_roles = file_load.load_shoot_roles()
video = list(file_load.get_table().items())[0][1]
assert video.get('Workers') is None
video = utils.video_add_worker_and_roles(video, shoot_roles)
assert isinstance(video['Workers'], dict)
def test_video_add_role_unscaled_percents():
shoot_roles = file_load.load_shoot_roles()
role_percents = file_load.load_role_percents()
video = list(file_load.get_table().items())[0][1]
video = utils.video_add_worker_and_roles(video, shoot_roles)
assert video.get('role percents unscaled') is None
video = utils.video_add_role_unscaled_percents(video, role_percents)
assert 0 <= video['role percents unscaled']['QAPC'] <= 100
def test_video_create_scaling_factor():
shoot_roles = file_load.load_shoot_roles()
role_percents = file_load.load_role_percents()
video = list(file_load.get_table().items())[0][1]
video = utils.video_add_worker_and_roles(video, shoot_roles)
video = utils.video_add_role_unscaled_percents(video, role_percents)
assert video.get('scaling factor') is None
video = utils.video_create_scaling_factor(video)
assert 0 <= video.get('scaling factor') <= 1
def test_video_scale_role_percents():
shoot_roles = file_load.load_shoot_roles()
role_percents = file_load.load_role_percents()
video = list(file_load.get_table().items())[0][1]
video = utils.video_add_worker_and_roles(video, shoot_roles)
video = utils.video_add_role_unscaled_percents(video, role_percents)
video = utils.video_create_scaling_factor(video)
assert video.get('role percents') is None
video = utils.video_scale_role_percents(video)
assert 0 <= video['role percents']['QAPC'] <= 100
def test_scaling_factor_applies_properly():
shoot_roles = file_load.load_shoot_roles()
role_percents = file_load.load_role_percents()
video = list(file_load.get_table().items())[0][1]
video = utils.video_add_worker_and_roles(video, shoot_roles)
video = utils.video_add_role_unscaled_percents(video, role_percents)
video = utils.video_create_scaling_factor(video)
video = utils.video_scale_role_percents(video)
expected_scaled_percent = video['role percents unscaled']['QAPC'] * video['scaling factor']
scaled_percent = video['role percents']['QAPC']
assert expected_scaled_percent == scaled_percent
def test_video_get_total_earnings():
shoot_roles = file_load.load_shoot_roles()
role_percents = file_load.load_role_percents()
video = list(file_load.get_table().items())[0][1]
video = utils.video_add_worker_and_roles(video, shoot_roles)
video = utils.video_add_role_unscaled_percents(video, role_percents)
video = utils.video_create_scaling_factor(video)
video = utils.video_scale_role_percents(video)
assert video.get('total earnings') is None
video = utils.video_get_total_earnings(video)
assert video.get('total earnings') is not None
def test_video_get_worker_earnings():
shoot_roles = file_load.load_shoot_roles()
role_percents = file_load.load_role_percents()
video = list(file_load.get_table().items())[0][1]
video = utils.video_add_worker_and_roles(video, shoot_roles)
video = utils.video_add_role_unscaled_percents(video, role_percents)
video = utils.video_create_scaling_factor(video)
video = utils.video_scale_role_percents(video)
video = utils.video_get_total_earnings(video)
assert video.get('earnings') is None
video = utils.video_get_worker_earnings(video)
assert isinstance(video.get('earnings'), dict)
def test_validate_earnings():
shoot_roles = file_load.load_shoot_roles()
role_percents = file_load.load_role_percents()
video = list(file_load.get_table().items())[0][1]
video = utils.video_add_worker_and_roles(video, shoot_roles)
video = utils.video_add_role_unscaled_percents(video, role_percents)
video = utils.video_create_scaling_factor(video)
video = utils.video_scale_role_percents(video)
video = utils.video_get_total_earnings(video)
video = utils.video_get_worker_earnings(video)
total_earnings = video['total earnings']
sum_all_earnings = 0
for earning in video['earnings'].values():
sum_all_earnings += earning
assert round(total_earnings, 2) == round(sum_all_earnings, 2)
def test_all_videos():
shoot_roles = file_load.load_shoot_roles()
role_percents = file_load.load_role_percents()
for video in file_load.get_table().values():
video = utils.video_add_worker_and_roles(video, shoot_roles)
video = utils.video_add_role_unscaled_percents(video, role_percents)
video = utils.video_create_scaling_factor(video)
video = utils.video_scale_role_percents(video)
video = utils.video_get_total_earnings(video)
video = utils.video_get_worker_earnings(video)
total_earnings = video['total earnings']
sum_all_earnings = 0
for earning in video['earnings'].values():
sum_all_earnings += earning
assert round(total_earnings, 2) == round(sum_all_earnings, 2)
def test_video_add_image():
video = list(file_load.get_table().items())[0][1]
images = file_load.get_images()
assert video.get('image') is None
video = utils.video_add_images(video, images)
assert video.get('image') is not None
|
agpl-3.0
| -8,849,307,184,144,119,000
| 34.403614
| 95
| 0.65884
| false
| 3.29798
| true
| false
| false
|
diath/pyfsw
|
pyfsw/models/shop.py
|
1
|
2866
|
from sqlalchemy import Column, Integer, String, Text, ForeignKey
from pyfsw import db
class ShopCategory(db.Model):
__tablename__ = 'shop_category'
# Standard columns
id = Column(Integer, primary_key=True, unique=True)
name = Column(String(32))
enabled = Column(Integer, default=1)
# Relationship
items = db.relationship('ShopItem', backref='shop_category')
# Methods
def __init__(self):
pass
def __repr__(self):
return '<ShopCategory.{}>'.format(self.id)
class ShopItem(db.Model):
__tablename__ = 'shop_item'
# Constants
Type = {
'Item': 1,
'Container': 2,
'Addon': 3,
'Mount': 4
}
# Standard columns
id = Column(Integer, primary_key=True, unique=True)
name = Column(String(32))
description = Column(Text)
category_id = Column(Integer, ForeignKey('shop_category.id'))
type = Column(Integer)
key = Column(Integer)
value = Column(Integer)
price = Column(Integer)
custom_image = Column(String(128), default='')
enabled = Column(Integer, default=1)
# Methods
def __init__(self):
pass
def __repr__(self):
return '<ShopItem.{}>'.format(self.id)
class ShopOrder(db.Model):
__tablename__ = 'shop_order'
# Standard columns
id = Column(Integer, primary_key=True, unique=True)
name = Column(String(32))
type = Column(Integer)
key = Column(Integer)
value = Column(Integer)
price = Column(Integer)
ordered = Column(Integer)
character_id = Column(Integer)
# Methods
def __init__(self):
pass
def __repr__(self):
return '<ShopOrder.{}>'.format(self.id)
class ShopHistory(db.Model):
__tablename__ = 'shop_history'
# Standard columns
id = Column(Integer, primary_key=True, unique=True)
name = Column(String(32))
type = Column(Integer)
key = Column(Integer)
value = Column(Integer)
price = Column(Integer)
ordered = Column(Integer)
delivered = Column(Integer)
character_id = Column(Integer)
account_id = Column(Integer)
# Methods
def __init__(self):
pass
def __repr__(self):
return '<ShopHistory.{}>'.format(self.id)
class PayPalHistory(db.Model):
__tablename__ = 'paypal_history'
# Standard columns
id = Column(Integer, primary_key=True, unique=True)
account_id = Column(Integer)
timestamp = Column(Integer)
status = Column(String(32))
test = Column(Integer)
origin = Column(String(64))
amount = Column(String(16))
points = Column(Integer)
# Methods
def __init__(self):
pass
def __repr__(self):
return '<PayPalHistory.{}>'.format(self.id)
class ZayPayHistory(db.Model):
__tablename__ = 'zaypay_history'
# Standard columns
id = Column(Integer, primary_key=True, unique=True)
account_id = Column(Integer)
timestamp = Column(Integer)
payment_id = Column(Integer)
price_setting_id = Column(Integer)
amount = Column(Integer)
points = Column(Integer)
# Methods
def __init__(self):
pass
def __repr__(self):
return '<ZayPayHistory.{}>'.format(self.id)
|
mit
| -5,246,374,000,673,775,000
| 20.877863
| 64
| 0.684229
| false
| 2.973029
| false
| false
| false
|
justinvforvendetta/test1234
|
src/blockchain_processor.py
|
1
|
27894
|
import ast
import hashlib
from json import dumps, loads
import os
from Queue import Queue
import random
import sys
import time
import threading
import urllib
import deserialize
from processor import Processor, print_log
from utils import *
from storage import Storage
from utils import logger
class BlockchainProcessor(Processor):
def __init__(self, config, shared):
Processor.__init__(self)
self.mtimes = {} # monitoring
self.shared = shared
self.config = config
self.up_to_date = False
self.watch_lock = threading.Lock()
self.watch_blocks = []
self.watch_headers = []
self.watched_addresses = {}
self.history_cache = {}
self.max_cache_size = 100000
self.chunk_cache = {}
self.cache_lock = threading.Lock()
self.headers_data = ''
self.headers_path = config.get('leveldb', 'path')
self.mempool_values = {}
self.mempool_addresses = {}
self.mempool_hist = {}
self.mempool_hashes = set([])
self.mempool_lock = threading.Lock()
self.address_queue = Queue()
try:
self.test_reorgs = config.getboolean('leveldb', 'test_reorgs') # simulate random blockchain reorgs
except:
self.test_reorgs = False
self.storage = Storage(config, shared, self.test_reorgs)
self.dblock = threading.Lock()
self.reecoind_url = 'http://%s:%s@%s:%s/' % (
config.get('bitcoind', 'bitcoind_user'),
config.get('bitcoind', 'bitcoind_password'),
config.get('bitcoind', 'bitcoind_host'),
config.get('bitcoind', 'bitcoind_port'))
self.sent_height = 0
self.sent_header = None
# catch_up headers
self.init_headers(self.storage.height)
self.blockchain_thread = threading.Thread(target = self.do_catch_up)
self.blockchain_thread.start()
def do_catch_up(self):
self.header = self.block2header(self.reecoind('getblock', [self.storage.last_hash]))
self.header['utxo_root'] = self.storage.get_root_hash().encode('hex')
self.catch_up(sync=False)
print_log("Blockchain is up to date.")
self.memorypool_update()
print_log("Memory pool initialized.")
while not self.shared.stopped():
self.main_iteration()
if self.shared.paused():
print_log("reecoind is responding")
self.shared.unpause()
time.sleep(10)
def mtime(self, name):
now = time.time()
if name != '':
delta = now - self.now
t = self.mtimes.get(name, 0)
self.mtimes[name] = t + delta
self.now = now
def print_mtime(self):
s = ''
for k, v in self.mtimes.items():
s += k+':'+"%.2f"%v+' '
print_log(s)
def reecoind(self, method, params=[]):
postdata = dumps({"method": method, 'params': params, 'id': 'jsonrpc'})
while True:
try:
respdata = urllib.urlopen(self.reecoind_url, postdata).read()
break
except:
print_log("cannot reach reecoind...")
self.shared.pause()
time.sleep(10)
if self.shared.stopped():
# this will end the thread
raise
continue
r = loads(respdata)
if r['error'] is not None:
raise BaseException(r['error'])
return r.get('result')
def block2header(self, b):
return {
"block_height": b.get('height'),
"version": b.get('version'),
"prev_block_hash": b.get('previousblockhash'),
"merkle_root": b.get('merkleroot'),
"timestamp": b.get('time'),
"bits": int(b.get('bits'), 16),
"nonce": b.get('nonce'),
}
def get_header(self, height):
block_hash = self.reecoind('getblockhash', [height])
b = self.reecoind('getblock', [block_hash])
return self.block2header(b)
def init_headers(self, db_height):
self.chunk_cache = {}
self.headers_filename = os.path.join(self.headers_path, 'blockchain_headers')
if os.path.exists(self.headers_filename):
height = os.path.getsize(self.headers_filename)/80 - 1 # the current height
if height > 0:
prev_hash = self.hash_header(self.read_header(height))
else:
prev_hash = None
else:
open(self.headers_filename, 'wb').close()
prev_hash = None
height = -1
if height < db_height:
print_log("catching up missing headers:", height, db_height)
try:
while height < db_height:
height += 1
header = self.get_header(height)
if height > 1:
if prev_hash != header.get('prev_block_hash'):
# The prev_hash block is orphaned, go back
print_log("reorganizing, a block in file is orphaned:", prev_hash)
# Go to the parent of the orphaned block
height -= 2
prev_hash = self.hash_header(self.read_header(height))
continue
self.write_header(header, sync=False)
prev_hash = self.hash_header(header)
if (height % 1000) == 0:
print_log("headers file:", height)
except KeyboardInterrupt:
self.flush_headers()
sys.exit()
self.flush_headers()
def hash_header(self, header):
return rev_hex(HashX11(header_to_string(header).decode('hex')).encode('hex'))
def read_header(self, block_height):
if os.path.exists(self.headers_filename):
with open(self.headers_filename, 'rb') as f:
f.seek(block_height * 80)
h = f.read(80)
if len(h) == 80:
h = header_from_string(h)
return h
def read_chunk(self, index):
with open(self.headers_filename, 'rb') as f:
f.seek(index*2016*80)
chunk = f.read(2016*80)
return chunk.encode('hex')
def write_header(self, header, sync=True):
if not self.headers_data:
self.headers_offset = header.get('block_height')
self.headers_data += header_to_string(header).decode('hex')
if sync or len(self.headers_data) > 40*100:
self.flush_headers()
with self.cache_lock:
chunk_index = header.get('block_height')/2016
if self.chunk_cache.get(chunk_index):
self.chunk_cache.pop(chunk_index)
def pop_header(self):
# we need to do this only if we have not flushed
if self.headers_data:
self.headers_data = self.headers_data[:-40]
def flush_headers(self):
if not self.headers_data:
return
with open(self.headers_filename, 'rb+') as f:
f.seek(self.headers_offset*80)
f.write(self.headers_data)
self.headers_data = ''
def get_chunk(self, i):
# store them on disk; store the current chunk in memory
with self.cache_lock:
chunk = self.chunk_cache.get(i)
if not chunk:
chunk = self.read_chunk(i)
self.chunk_cache[i] = chunk
return chunk
def get_mempool_transaction(self, txid):
try:
raw_tx = self.reecoind('getrawtransaction', [txid, 0])
except:
return None
vds = deserialize.BCDataStream()
vds.write(raw_tx.decode('hex'))
try:
return deserialize.parse_Transaction(vds, is_coinbase=False)
except:
print_log("ERROR: cannot parse", txid)
return None
def get_history(self, addr, cache_only=False):
with self.cache_lock:
hist = self.history_cache.get(addr)
if hist is not None:
return hist
if cache_only:
return -1
with self.dblock:
hist = self.storage.get_history(addr)
# add memory pool
with self.mempool_lock:
for txid, delta in self.mempool_hist.get(addr, []):
hist.append({'tx_hash':txid, 'height':0})
with self.cache_lock:
if len(self.history_cache) > self.max_cache_size:
logger.info("clearing cache")
self.history_cache.clear()
self.history_cache[addr] = hist
return hist
def get_unconfirmed_value(self, addr):
v = 0
with self.mempool_lock:
for txid, delta in self.mempool_hist.get(addr, []):
v += delta
return v
def get_status(self, addr, cache_only=False):
tx_points = self.get_history(addr, cache_only)
if cache_only and tx_points == -1:
return -1
if not tx_points:
return None
if tx_points == ['*']:
return '*'
status = ''
for tx in tx_points:
status += tx.get('tx_hash') + ':%d:' % tx.get('height')
return hashlib.sha256(status).digest().encode('hex')
def get_merkle(self, tx_hash, height):
block_hash = self.reecoind('getblockhash', [height])
b = self.reecoind('getblock', [block_hash])
tx_list = b.get('tx')
tx_pos = tx_list.index(tx_hash)
merkle = map(hash_decode, tx_list)
target_hash = hash_decode(tx_hash)
s = []
while len(merkle) != 1:
if len(merkle) % 2:
merkle.append(merkle[-1])
n = []
while merkle:
new_hash = Hash(merkle[0] + merkle[1])
if merkle[0] == target_hash:
s.append(hash_encode(merkle[1]))
target_hash = new_hash
elif merkle[1] == target_hash:
s.append(hash_encode(merkle[0]))
target_hash = new_hash
n.append(new_hash)
merkle = merkle[2:]
merkle = n
return {"block_height": height, "merkle": s, "pos": tx_pos}
def add_to_history(self, addr, tx_hash, tx_pos, tx_height):
# keep it sorted
s = self.serialize_item(tx_hash, tx_pos, tx_height) + 40*chr(0)
assert len(s) == 80
serialized_hist = self.batch_list[addr]
l = len(serialized_hist)/80
for i in range(l-1, -1, -1):
item = serialized_hist[80*i:80*(i+1)]
item_height = int(rev_hex(item[36:39].encode('hex')), 16)
if item_height <= tx_height:
serialized_hist = serialized_hist[0:80*(i+1)] + s + serialized_hist[80*(i+1):]
break
else:
serialized_hist = s + serialized_hist
self.batch_list[addr] = serialized_hist
# backlink
txo = (tx_hash + int_to_hex(tx_pos, 4)).decode('hex')
self.batch_txio[txo] = addr
def deserialize_block(self, block):
txlist = block.get('tx')
tx_hashes = [] # ordered txids
txdict = {} # deserialized tx
is_coinbase = True
for raw_tx in txlist:
tx_hash = hash_encode(Hash(raw_tx.decode('hex')))
vds = deserialize.BCDataStream()
vds.write(raw_tx.decode('hex'))
try:
tx = deserialize.parse_Transaction(vds, is_coinbase)
except:
print_log("ERROR: cannot parse", tx_hash)
continue
tx_hashes.append(tx_hash)
txdict[tx_hash] = tx
is_coinbase = False
return tx_hashes, txdict
def import_block(self, block, block_hash, block_height, sync, revert=False):
touched_addr = set([])
# deserialize transactions
tx_hashes, txdict = self.deserialize_block(block)
# undo info
if revert:
undo_info = self.storage.get_undo_info(block_height)
tx_hashes.reverse()
else:
undo_info = {}
for txid in tx_hashes: # must be ordered
tx = txdict[txid]
if not revert:
undo = self.storage.import_transaction(txid, tx, block_height, touched_addr)
undo_info[txid] = undo
else:
undo = undo_info.pop(txid)
self.storage.revert_transaction(txid, tx, block_height, touched_addr, undo)
if revert:
assert undo_info == {}
# add undo info
if not revert:
self.storage.write_undo_info(block_height, self.reecoind_height, undo_info)
# add the max
self.storage.db_undo.put('height', repr( (block_hash, block_height, self.storage.db_version) ))
for addr in touched_addr:
self.invalidate_cache(addr)
self.storage.update_hashes()
def add_request(self, session, request):
# see if we can get if from cache. if not, add request to queue
message_id = request.get('id')
try:
result = self.process(request, cache_only=True)
except BaseException as e:
self.push_response(session, {'id': message_id, 'error': str(e)})
return
if result == -1:
self.queue.put((session, request))
else:
self.push_response(session, {'id': message_id, 'result': result})
def do_subscribe(self, method, params, session):
with self.watch_lock:
if method == 'blockchain.numblocks.subscribe':
if session not in self.watch_blocks:
self.watch_blocks.append(session)
elif method == 'blockchain.headers.subscribe':
if session not in self.watch_headers:
self.watch_headers.append(session)
elif method == 'blockchain.address.subscribe':
address = params[0]
l = self.watched_addresses.get(address)
if l is None:
self.watched_addresses[address] = [session]
elif session not in l:
l.append(session)
def do_unsubscribe(self, method, params, session):
with self.watch_lock:
if method == 'blockchain.numblocks.subscribe':
if session in self.watch_blocks:
self.watch_blocks.remove(session)
elif method == 'blockchain.headers.subscribe':
if session in self.watch_headers:
self.watch_headers.remove(session)
elif method == "blockchain.address.subscribe":
addr = params[0]
l = self.watched_addresses.get(addr)
if not l:
return
if session in l:
l.remove(session)
if session in l:
print_log("error rc!!")
self.shared.stop()
if l == []:
self.watched_addresses.pop(addr)
def process(self, request, cache_only=False):
message_id = request['id']
method = request['method']
params = request.get('params', [])
result = None
error = None
if method == 'blockchain.numblocks.subscribe':
result = self.storage.height
elif method == 'blockchain.headers.subscribe':
result = self.header
elif method == 'blockchain.address.subscribe':
address = str(params[0])
result = self.get_status(address, cache_only)
elif method == 'blockchain.address.get_history':
address = str(params[0])
result = self.get_history(address, cache_only)
elif method == 'blockchain.address.get_mempool':
address = str(params[0])
result = self.get_unconfirmed_history(address, cache_only)
elif method == 'blockchain.address.get_balance':
address = str(params[0])
confirmed = self.storage.get_balance(address)
unconfirmed = self.get_unconfirmed_value(address)
result = { 'confirmed':confirmed, 'unconfirmed':unconfirmed }
elif method == 'blockchain.address.get_proof':
address = str(params[0])
result = self.storage.get_proof(address)
elif method == 'blockchain.address.listunspent':
address = str(params[0])
result = self.storage.listunspent(address)
elif method == 'blockchain.utxo.get_address':
txid = str(params[0])
pos = int(params[1])
txi = (txid + int_to_hex(pos, 4)).decode('hex')
result = self.storage.get_address(txi)
elif method == 'blockchain.block.get_header':
if cache_only:
result = -1
else:
height = int(params[0])
result = self.get_header(height)
elif method == 'blockchain.block.get_chunk':
if cache_only:
result = -1
else:
index = int(params[0])
result = self.get_chunk(index)
elif method == 'blockchain.transaction.broadcast':
try:
txo = self.reecoind('sendrawtransaction', params)
print_log("sent tx:", txo)
result = txo
except BaseException, e:
result = str(e) # do not send an error
print_log("error:", result, params)
elif method == 'blockchain.transaction.get_merkle':
if cache_only:
result = -1
else:
tx_hash = params[0]
tx_height = params[1]
result = self.get_merkle(tx_hash, tx_height)
elif method == 'blockchain.transaction.get':
tx_hash = params[0]
result = self.reecoind('getrawtransaction', [tx_hash, 0])
elif method == 'blockchain.estimatefee':
num = int(params[0])
result = self.reecoind('estimatefee', [num])
else:
raise BaseException("unknown method:%s" % method)
if cache_only and result == -1:
return -1
return result
def getfullblock(self, block_hash):
block = self.reecoind('getblock', [block_hash])
rawtxreq = []
i = 0
for txid in block['tx']:
rawtxreq.append({
"method": "getrawtransaction",
"params": [txid],
"id": i,
})
i += 1
postdata = dumps(rawtxreq)
try:
respdata = urllib.urlopen(self.reecoind_url, postdata).read()
except:
logger.error("reecoind error (getfullblock)",exc_info=True)
self.shared.stop()
r = loads(respdata)
rawtxdata = []
for ir in r:
if ir['error'] is not None:
self.shared.stop()
print_log("Error: make sure you run reecoind with txindex=1; use -reindex if needed.")
raise BaseException(ir['error'])
rawtxdata.append(ir['result'])
block['tx'] = rawtxdata
return block
def catch_up(self, sync=True):
prev_root_hash = None
while not self.shared.stopped():
self.mtime('')
# are we done yet?
info = self.reecoind('getinfo')
self.reecoind_height = info.get('blocks')
reecoind_block_hash = self.reecoind('getblockhash', [self.reecoind_height])
if self.storage.last_hash == reecoind_block_hash:
self.up_to_date = True
break
# fixme: this is unsafe, if we revert when the undo info is not yet written
revert = (random.randint(1, 100) == 1) if self.test_reorgs else False
# not done..
self.up_to_date = False
try:
next_block_hash = self.reecoind('getblockhash', [self.storage.height + 1])
next_block = self.getfullblock(next_block_hash)
except BaseException, e:
revert = True
next_block = self.getfullblock(self.storage.last_hash)
self.mtime('daemon')
if (next_block.get('previousblockhash') == self.storage.last_hash) and not revert:
prev_root_hash = self.storage.get_root_hash()
self.import_block(next_block, next_block_hash, self.storage.height+1, sync)
self.storage.height = self.storage.height + 1
self.write_header(self.block2header(next_block), sync)
self.storage.last_hash = next_block_hash
self.mtime('import')
if self.storage.height % 1000 == 0 and not sync:
t_daemon = self.mtimes.get('daemon')
t_import = self.mtimes.get('import')
print_log("catch_up: block %d (%.3fs %.3fs)" % (self.storage.height, t_daemon, t_import), self.storage.get_root_hash().encode('hex'))
self.mtimes['daemon'] = 0
self.mtimes['import'] = 0
else:
# revert current block
block = self.getfullblock(self.storage.last_hash)
print_log("blockchain reorg", self.storage.height, block.get('previousblockhash'), self.storage.last_hash)
self.import_block(block, self.storage.last_hash, self.storage.height, sync, revert=True)
self.pop_header()
self.flush_headers()
self.storage.height -= 1
# read previous header from disk
self.header = self.read_header(self.storage.height)
self.storage.last_hash = self.hash_header(self.header)
if prev_root_hash:
assert prev_root_hash == self.storage.get_root_hash()
prev_root_hash = None
self.header = self.block2header(self.reecoind('getblock', [self.storage.last_hash]))
self.header['utxo_root'] = self.storage.get_root_hash().encode('hex')
if self.shared.stopped():
print_log( "closing database" )
self.storage.close()
def memorypool_update(self):
mempool_hashes = set(self.reecoind('getrawmempool'))
touched_addresses = set([])
# get new transactions
new_tx = {}
for tx_hash in mempool_hashes:
if tx_hash in self.mempool_hashes:
continue
tx = self.get_mempool_transaction(tx_hash)
if not tx:
continue
new_tx[tx_hash] = tx
self.mempool_hashes.add(tx_hash)
# remove older entries from mempool_hashes
self.mempool_hashes = mempool_hashes
# check all tx outputs
for tx_hash, tx in new_tx.items():
mpa = self.mempool_addresses.get(tx_hash, {})
out_values = []
for x in tx.get('outputs'):
out_values.append( x['value'] )
addr = x.get('address')
if not addr:
continue
v = mpa.get(addr,0)
v += x['value']
mpa[addr] = v
touched_addresses.add(addr)
self.mempool_addresses[tx_hash] = mpa
self.mempool_values[tx_hash] = out_values
# check all inputs
for tx_hash, tx in new_tx.items():
mpa = self.mempool_addresses.get(tx_hash, {})
for x in tx.get('inputs'):
# we assume that the input address can be parsed by deserialize(); this is true for Electrum transactions
addr = x.get('address')
if not addr:
continue
v = self.mempool_values.get(x.get('prevout_hash'))
if v:
value = v[ x.get('prevout_n')]
else:
txi = (x.get('prevout_hash') + int_to_hex(x.get('prevout_n'), 4)).decode('hex')
try:
value = self.storage.get_utxo_value(addr,txi)
except:
print_log("utxo not in database; postponing mempool update")
return
v = mpa.get(addr,0)
v -= value
mpa[addr] = v
touched_addresses.add(addr)
self.mempool_addresses[tx_hash] = mpa
# remove deprecated entries from mempool_addresses
for tx_hash, addresses in self.mempool_addresses.items():
if tx_hash not in self.mempool_hashes:
self.mempool_addresses.pop(tx_hash)
self.mempool_values.pop(tx_hash)
for addr in addresses:
touched_addresses.add(addr)
# rebuild mempool histories
new_mempool_hist = {}
for tx_hash, addresses in self.mempool_addresses.items():
for addr, delta in addresses.items():
h = new_mempool_hist.get(addr, [])
if tx_hash not in h:
h.append((tx_hash, delta))
new_mempool_hist[addr] = h
with self.mempool_lock:
self.mempool_hist = new_mempool_hist
# invalidate cache for touched addresses
for addr in touched_addresses:
self.invalidate_cache(addr)
def invalidate_cache(self, address):
with self.cache_lock:
if address in self.history_cache:
print_log("cache: invalidating", address)
self.history_cache.pop(address)
with self.watch_lock:
sessions = self.watched_addresses.get(address)
if sessions:
# TODO: update cache here. if new value equals cached value, do not send notification
self.address_queue.put((address,sessions))
def close(self):
self.blockchain_thread.join()
print_log("Closing database...")
self.storage.close()
print_log("Database is closed")
def main_iteration(self):
if self.shared.stopped():
print_log("Stopping timer")
return
with self.dblock:
t1 = time.time()
self.catch_up()
t2 = time.time()
self.memorypool_update()
if self.sent_height != self.storage.height:
self.sent_height = self.storage.height
for session in self.watch_blocks:
self.push_response(session, {
'id': None,
'method': 'blockchain.numblocks.subscribe',
'params': [self.storage.height],
})
if self.sent_header != self.header:
print_log("blockchain: %d (%.3fs)" % (self.storage.height, t2 - t1))
self.sent_header = self.header
for session in self.watch_headers:
self.push_response(session, {
'id': None,
'method': 'blockchain.headers.subscribe',
'params': [self.header],
})
while True:
try:
addr, sessions = self.address_queue.get(False)
except:
break
status = self.get_status(addr)
for session in sessions:
self.push_response(session, {
'id': None,
'method': 'blockchain.address.subscribe',
'params': [addr, status],
})
|
agpl-3.0
| -8,479,303,598,396,417,000
| 32.688406
| 153
| 0.525203
| false
| 4.118411
| false
| false
| false
|
ericmjl/bokeh
|
bokeh/core/property/string.py
|
1
|
3831
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide the Regex property.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import base64
import re
# Bokeh imports
from .primitive import String
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'Regex',
'Base64String',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Regex(String):
''' Accept strings that match a given regular expression.
Args:
default (string or None, optional) :
A default value for attributes created from this property to
have (default: None)
help (str or None, optional) :
A documentation string for this property. It will be automatically
used by the :ref:`bokeh.sphinxext.bokeh_prop` extension when
generating Spinx documentation. (default: None)
serialized (bool, optional) :
Whether attributes created from this property should be included
in serialization (default: True)
readonly (bool, optional) :
Whether attributes created from this property are read-only.
(default: False)
Example:
.. code-block:: python
>>> class RegexModel(HasProps):
... prop = Regex("foo[0-9]+bar")
...
>>> m = RegexModel()
>>> m.prop = "foo123bar"
>>> m.prop = "foo" # ValueError !!
>>> m.prop = [1, 2, 3] # ValueError !!
'''
def __init__(self, regex, default=None, help=None):
self.regex = re.compile(regex)
super().__init__(default=default, help=help)
def __str__(self):
return "%s(%r)" % (self.__class__.__name__, self.regex.pattern)
def validate(self, value, detail=True):
super().validate(value, detail)
if not (value is None or self.regex.match(value) is not None):
msg = "" if not detail else "expected a string matching %r pattern, got %r" % (self.regex.pattern, value)
raise ValueError(msg)
class Base64String(String):
def serialize_value(self, value):
''' Encode a ascii string using Base64.
Args:
value : a string to encode
Returns:
string
'''
if isinstance(value, str):
value = base64.b64encode(value.encode("utf-8")).decode("utf-8")
return value
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
bsd-3-clause
| 7,637,204,481,124,638,000
| 30.661157
| 117
| 0.394936
| false
| 5.848855
| false
| false
| false
|
jacobzelek/ffprobe
|
ffprobe/ffprobe.py
|
1
|
12463
|
#!/usr/bin/python
# Filename: ffprobe.py
"""
Python wrapper for ffprobe command line tool. ffprobe must exist in the path or in a common installation path
"""
version = '0.4'
import subprocess
import re
import os
import sys
from os import listdir
from os.path import isfile, join
import json
import mimetypes
class FFProbe(object):
"""
FFProbe wraps the ffprobe command and pulls the data into an object form::
metadata = FFProbe('multimedia-file.mov')
OR
metadata = FFProbe(file_contents)
OR
metadata = FFProbe('multimedia-file.mov', ffprobe_path='/usr/local/bin/ffprobe')
"""
def __init__(self, source, ffprobe_path=None):
ffprobe_cmd = None
if ffprobe_path is not None and os.path.exists(ffprobe_path):
ffprobe_cmd = ffprobe_path
else:
ffprobe_cmd = os.environ.get('FFPROBE', 'ffprobe')
try:
with open(os.devnull, 'w') as tempf:
subprocess.check_call([ffprobe_cmd, "-h"], stdout=tempf,
stderr=tempf)
except:
paths = {
"Windows": ["ffprobe.exe"],
"Darwin": ["ffprobe", "/opt/local/bin/ffprobe", "/usr/local/bin/ffprobe"],
"Linux": ["ffprobe", "/opt/local/bin/ffprobe", "/usr/local/bin/ffprobe"]
}
# Find path of transcoder
found = False
for path in paths[platform.system()]:
if os.path.exists(path):
ffprobe_cmd = path
found = True
if not found:
raise IOError('ffprobe not found')
self.streams = []
self.video = []
self.audio = []
self.duration = 0.0
self.mimetype = None
self.returncode = None
# If source is file and it exists the use path, otherwise
# open file and send contents to ffprobe through stdin
DEVNULL = open(os.devnull, 'wb')
args = [ffprobe_cmd, "-show_streams", "-print_format", "json", "-show_format", "-i"]
if os.path.isfile(source):
try:
type, encoding = mimetypes.guess_type(source)
self.mimetype = type
except:
pass
args.append(source)
proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=DEVNULL)
else:
args.append("-")
proc = subprocess.Popen(args, stdin=source, stdout=subprocess.PIPE, stderr=DEVNULL)
raw_out = ""
while self.returncode is None:
for line in proc.stdout:
raw_out += line
self.returncode = proc.poll()
proc.stdout.close()
if self.returncode != 0:
raise IOError('ffprobe failed')
json_out = json.loads(raw_out)
for key in json_out["format"]:
self.__dict__[key] = json_out["format"][key]
for stream in json_out["streams"]:
self.streams.append(FFStream(stream))
for stream in self.streams:
if stream.isAudio() or stream.isVideo():
if "duration" not in stream.__dict__ or stream.__dict__["duration"] == 0.0:
stream.__dict__["duration"] = self.duration
if stream.isAudio():
self.audio.append(stream)
if stream.isVideo():
self.video.append(stream)
# @todo If mp4 extension but no video stream then set mimetype to audio/mp4
# @todo Needs to follow http://tools.ietf.org/html/rfc6381
# @todo Need to add mp4v and mp4a (aac)
def html5SourceType(self):
string = ''
if self.mimetype is not None:
if self.mimetype == 'audio/mpeg':
return self.mimetype
string += self.mimetype
video = None
audio = None
if len(self.video) > 0:
video = self.video[0]
if len(self.audio) > 0:
audio = self.audio[0]
if video is not None or audio is not None:
string += '; codecs="'
codecs = []
if video is not None:
if video.codec() == 'h264':
codec = 'avc1.'
profile = video.__dict__["profile"]
if profile == 'High':
codec += '6400'
elif profile == 'Baseline':
codec += '42E0'
elif profile == 'Constrained Baseline':
codec += '42E0'
elif profile == 'Main':
codec += '4D40'
elif profile == 'Extended':
codec += '58A0'
codec += hex(int(video.__dict__["level"]))[2:].upper()
codecs.append(codec)
else:
codecs.append(video.codec())
if audio is not None:
if audio.codec() == 'aac':
codecs.append('mp4a.40.2')
else:
codecs.append(audio.codec())
string += ', '.join(codecs)
string += '"'
return string
def durationSeconds(self):
"""
Returns the runtime duration of the file as a floating point number of seconds.
Returns 0.0 if value not found
"""
f = 0.0
if 'duration' in self.__dict__:
try:
f = float(self.__dict__['duration'])
except Exception as e:
pass
return f
def bitrate(self):
"""
Returns bitrate as an integer in bps
"""
b = 0
if 'bit_rate' in self.__dict__:
try:
b = int(self.__dict__['bit_rate'])
except Exception as e:
pass
return b
class FFStream(object):
"""
An object representation of an individual stream in a multimedia file.
"""
def __init__(self, obj):
for key in obj.keys():
self.__dict__[key] = obj[key]
def isData(self):
"""
Is this stream labelled as an data stream?
"""
val = False
if 'codec_type' in self.__dict__:
if str(self.__dict__['codec_type']) == 'data':
val = True
return val
def isAudio(self):
"""
Is this stream labelled as an audio stream?
"""
val = False
if 'codec_type' in self.__dict__:
if str(self.__dict__['codec_type']) == 'audio':
val = True
return val
def isVideo(self):
"""
Is the stream labelled as a video stream.
"""
val = False
if 'codec_type' in self.__dict__:
if self.codec_type == 'video':
val = True
return val
def isSubtitle(self):
"""
Is the stream labelled as a subtitle stream.
"""
val = False
if 'codec_type' in self.__dict__:
if str(self.codec_type)=='subtitle':
val = True
return val
def frameSize(self):
"""
Returns the pixel frame size as an integer tuple (width,height) if the stream is a video stream.
Returns None if it is not a video stream.
"""
size = None
if self.isVideo():
if 'width' in self.__dict__ and 'height' in self.__dict__:
try:
size = (int(self.__dict__['width']),int(self.__dict__['height']))
except Exception as e:
pass
size = (0,0)
return size
def pixelFormat(self):
"""
Returns a string representing the pixel format of the video stream. e.g. yuv420p.
Returns none is it is not a video stream.
"""
f = None
if self.isVideo():
if 'pix_fmt' in self.__dict__:
f = self.__dict__['pix_fmt']
return f
def frames(self):
"""
Returns the length of a video stream in frames. Returns 0 if not a video stream.
"""
f = 0
if self.isVideo() or self.isAudio():
if 'nb_frames' in self.__dict__:
try:
f = int(self.__dict__['nb_frames'])
except Exception as e:
pass
return f
def durationSeconds(self):
"""
Returns the runtime duration of the video stream as a floating point number of seconds.
Returns 0.0 if not a video stream.
"""
f = 0.0
if self.isVideo() or self.isAudio():
if 'duration' in self.__dict__:
try:
f = float(self.__dict__['duration'])
except Exception as e:
pass
return f
def language(self):
"""
Returns language tag of stream. e.g. eng
"""
lang = None
if 'TAG:language' in self.__dict__:
lang = self.__dict__['TAG:language']
return lang
def codec(self):
"""
Returns a string representation of the stream codec.
"""
codec_name = None
if 'codec_name' in self.__dict__:
codec_name = self.__dict__['codec_name']
return codec_name
def codecDescription(self):
"""
Returns a long representation of the stream codec.
"""
codec_d = None
if 'codec_long_name' in self.__dict__:
codec_d = self.__dict__['codec_long_name']
return codec_d
def codecTag(self):
"""
Returns a short representative tag of the stream codec.
"""
codec_t = None
if 'codec_tag_string' in self.__dict__:
codec_t = self.__dict__['codec_tag_string']
return codec_t
def bitrate(self):
"""
Returns bitrate as an integer in bps
"""
b = 0
if 'bit_rate' in self.__dict__:
try:
b = int(self.__dict__['bit_rate'])
except Exception as e:
pass
return b
def frameRate(self):
"""
Returns the framerate as an float in frames/second
"""
f = 0.0
if 'codec_type' in self.__dict__:
if str(self.__dict__['codec_type']) == 'video':
try:
if 'r_frame_rate' in self.__dict__:
values = self.__dict__['r_frame_rate']
values = values.split('/')
try:
f = float(values[0])/float(values[1])
except Exception as e:
pass
else:
if 'nb_frames' in self.__dict__ and 'duration' in self.__dict__:
try:
f = float(self.__dict__['nb_frames'])/float(self.__dict__['duration'])
except Exception as e:
pass
except Exception as e:
pass
return f
def printMeta(path):
m = FFProbe(path)
name = os.path.split(path)[1]
stream_count = 1
for s in m.streams:
type = "Video" if s.isVideo else "Audio"
print "[ %s - Stream #%s - %s ]" % (name, stream_count, type)
stream_count += 1
if s.isVideo():
print "Framerate: %f" % s.frameRate()
print "Frames: %i" % s.frames()
print "Width: %i" % s.frameSize()[0]
print "Height: %i" % s.frameSize()[1]
print "Duration: %f" % s.durationSeconds()
print "Bitrate: %i" % s.bitrate()
print ""
if __name__ == '__main__':
if len(sys.argv) == 2:
path = sys.argv[1]
if os.path.isfile(path):
printMeta(path)
elif os.path.isdir(path):
files = [ f for f in listdir(path) if isfile(join(path,f)) ]
for file in files:
if not file.startswith("."):
printMeta(path + file)
else:
sys.exit(1)
else:
print "Usage: python ffprobe.py <file>|<directory>"
|
mit
| 3,546,136,138,872,157,700
| 31.371429
| 109
| 0.473482
| false
| 4.407001
| false
| false
| false
|
Frky/scat
|
src/shell/chart/alloc.py
|
1
|
6907
|
#-*- coding: utf-8 -*-
import os
import sys
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import pylab as P
import pandas as pd
from .chart import Chart
from .entry.alloc import AllocEntry
class AllocChart(Chart):
def __init__(self, oracle, *args, **kwargs):
super(AllocChart, self).__init__(*args, **kwargs)
with open("test/coreutils.txt", "r") as f:
self.__coreutils = [line[:-1] for line in f.readlines()]
self._analysis = "alloc"
self.__parse_log()
self._data = sum(self._data.values(), list())
self.__oracle = oracle
def __parse_log(self):
if not os.path.exists(self._log):
return
with open(self._log, "r") as f:
for line in f.readlines():
pgm = line[:-1].split(":")[0]
self._data.setdefault(pgm, list())
entry = AllocEntry(line)
self._data[pgm].append(entry)
def __ok_or_ko(self, pgm, res, entry):
if res == "None":
return "n.c."
try:
if self.__oracle[pgm][entry] is not None and res in self.__oracle[pgm][entry]:
return "\\checked"
else:
return "\\texttimes"
except KeyError:
return "n.c."
def get(self, pgm=None):
if pgm is None:
return self._data
else:
return filter(lambda a: a.pgm == pgm, self._data)
def table(self):
tot = {
"alloc": {
"\\texttimes": 0,
"n.c.": 0,
"\\checked": 0,
},
"free": {
"\\texttimes": 0,
"n.c.": 0,
"\\checked": 0,
},
}
for e in sorted(self._data, key=lambda a:a.pgm):
if e.pgm not in self.__coreutils:
continue
if e.alloc == "None":
continue
print "{{\\tt {}}} & {}/{} & {:.3g} & {:.3g} & {:.3g}/{:.3g} \\\\".format(
e.pgm,
self.__ok_or_ko(e.pgm, e.alloc, "alloc"),
self.__ok_or_ko(e.pgm, e.free, "free"),
e.error_rate,
e.online,
e.offline[0],
e.offline[1],
)
tot["alloc"][self.__ok_or_ko(e.pgm, e.alloc, "alloc")] += 1
tot["free"][self.__ok_or_ko(e.pgm, e.free, "free")] += 1
for e in sorted(self._data, key=lambda a:a.pgm):
if e.pgm in self.__coreutils:
continue
print "{{\\tt {}}} & {}/{} & {:.3g} & {:.3g} & {:.3g}/{:.3g} \\\\".format(
e.pgm,
self.__ok_or_ko(e.pgm, e.alloc, "alloc"),
self.__ok_or_ko(e.pgm, e.free, "free"),
e.error_rate,
e.online,
e.offline[0],
e.offline[1],
)
print tot
def table_cmp(self, other):
for c in sorted(self._data, key=lambda a:a.pgm):
t = other.get(c.pgm)[0]
if c.pgm in self.__coreutils:
continue
print "{{\\tt {}}} & {}/{} & {}/{} & {:.3g} & {:.3g} & {:.3g}/{:.3g} & {:.3g}/{:.3g} \\\\".format(
c.pgm,
self.__ok_or_ko(c.pgm, c.alloc, "alloc"),
self.__ok_or_ko(c.pgm, c.free, "free"),
self.__ok_or_ko(t.pgm, t.alloc, "alloc"),
self.__ok_or_ko(t.pgm, t.free, "free"),
c.online,
t.online,
c.offline[0],
c.offline[1],
t.offline[0],
t.offline[1],
)
def draw_consistency(self):
data = dict()
for entry in self._data:
data.setdefault(entry.pgm, list())
data[entry.pgm].append(entry)
plt.figure(figsize=(12, 9))
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.ylabel("consistency rate")
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(True)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.xaxis.set_ticklabels([])
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
N = 0
for rank, (pgm, entries) in enumerate(data.items()):
consistency_rate = map(lambda a: a.consistency, entries)
color = Chart.generic_colors[rank % len(Chart.generic_colors)]
# Plot each line separately with its own color, using the Tableau 20
# color set in order.
for e in entries:
if self.__ok_or_ko(e.pgm, e.alloc, "alloc") == "\\checked" and \
self.__ok_or_ko(e.pgm, e.free, "free") == "\\checked":
if e.consistency >= 0.95:
color = Chart.colors["acc"]
else:
color = Chart.colors["fn"]
else:
if e.consistency > 0.95:
if self.__ok_or_ko(e.pgm, e.alloc, "alloc") == "\\checked":
color = Chart.generic_colors[-1]
else:
print e.pgm, e.alloc, e.free
color = Chart.colors["tot"]
else:
color = Chart.colors["acc"]
plt.plot(N, e.consistency, 'o', color=color, mec=color)
N += 1
# plt.plot(range(N, N + len(error_rate)), error_rate, 'o',
# lw=0, color=color, label=pgm, mec=color)
# plt.text(N, -0.05 * (1 + ((1 + rank) % 2)), pgm, color=color, fontsize=18)
N += 1
if rank < len(data.keys()) - 1:
plt.plot((N - 1, N - 1), (0, 1), '--', color="black", alpha=0.3)
xmin, xmax = -1, N
ymin, ymax = -0.1, 1.1
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.plot([xmin, xmax], [0.95, 0.95], "-", lw=0.5, color="black", alpha=0.5)
plt.plot([xmin, xmax], [0, 0], "-", lw=1, color="black")
plt.savefig("test/chart/alloc_consistency.png", bbox_inches="tight")
|
mit
| -6,983,431,459,954,425,000
| 37.586592
| 110
| 0.413638
| false
| 3.774317
| false
| false
| false
|
hjoliver/cylc
|
cylc/flow/scripts/trigger.py
|
1
|
2594
|
#!/usr/bin/env python3
# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""cylc trigger [OPTIONS] ARGS
Manually trigger tasks.
Examples:
$ cylc trigger REG # trigger all tasks in a running workflow
$ cylc trigger REG TASK_GLOB ... # trigger some tasks in a running workflow
NOTE waiting tasks that are queue-limited will be queued if triggered, to
submit as normal when released by the queue; queued tasks will submit
immediately if triggered, even if that violates the queue limit (so you may
need to trigger a queue-limited task twice to get it to submit immediately).
"""
import os.path
from cylc.flow.network.client_factory import get_client
from cylc.flow.option_parsers import CylcOptionParser as COP
from cylc.flow.terminal import cli_function
MUTATION = '''
mutation (
$wFlows: [WorkflowID]!,
$tasks: [NamespaceIDGlob]!,
$reflow: Boolean,
) {
trigger (
workflows: $wFlows,
tasks: $tasks,
reflow: $reflow
) {
result
}
}
'''
def get_option_parser():
parser = COP(
__doc__, comms=True, multitask_nocycles=True,
argdoc=[
('REG', 'Workflow name'),
('[TASK_GLOB ...]', 'Task matching patterns')])
parser.add_option(
"-r", "--reflow",
help="Start a new flow from the triggered task.",
action="store_true", default=False, dest="reflow")
return parser
@cli_function(get_option_parser)
def main(parser, options, workflow, *task_globs):
"""CLI for "cylc trigger"."""
workflow = os.path.normpath(workflow)
pclient = get_client(workflow, timeout=options.comms_timeout)
mutation_kwargs = {
'request_string': MUTATION,
'variables': {
'wFlows': [workflow],
'tasks': list(task_globs),
'reflow': options.reflow,
}
}
pclient('graphql', mutation_kwargs)
if __name__ == "__main__":
main()
|
gpl-3.0
| -334,009,959,158,072,200
| 27.822222
| 78
| 0.676561
| false
| 3.663842
| false
| false
| false
|
quarkslab/irma
|
probe/modules/custom/skeleton/plugin.py
|
1
|
2240
|
#
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
from datetime import datetime
from irma.common.utils.utils import timestamp
from irma.common.plugins import PluginBase
from irma.common.plugin_result import PluginResult
from irma.common.base.utils import IrmaProbeType
from irma.common.plugins.exceptions import PluginLoadError
class SkeletonPlugin(PluginBase):
class SkeletonResult:
ERROR = -1
FAILURE = 0
SUCCESS = 1
# =================
# plugin metadata
# =================
_plugin_name_ = "Skeleton"
_plugin_display_name_ = "Skeleton Display Name"
_plugin_author_ = "<author name>"
_plugin_version_ = "<version>"
_plugin_category_ = "custom"
_plugin_description_ = "Plugin skeleton"
_plugin_dependencies_ = []
_mimetype_regexp = None
# =============
# constructor
# =============
def __init__(self):
pass
@classmethod
def verify(cls):
raise PluginLoadError("Skeleton plugin is not meant to be loaded")
# ==================
# probe interfaces
# ==================
def run(self, paths):
response = PluginResult(name=type(self).plugin_display_name,
type=type(self).plugin_category,
version=None)
try:
started = timestamp(datetime.utcnow())
response.results = "Main analysis call here"
stopped = timestamp(datetime.utcnow())
response.duration = stopped - started
response.status = self.SkeletonResult.SUCCESS
except Exception as e:
response.status = self.SkeletonResult.ERROR
response.results = type(e).__name__ + " : " + str(e)
return response
|
apache-2.0
| 5,575,655,391,303,937,000
| 30.549296
| 74
| 0.615179
| false
| 4.12523
| false
| false
| false
|
ZeitOnline/zeit.cms
|
src/zeit/cms/workingcopy/browser/preview.py
|
1
|
2690
|
import urllib2
import urlparse
import zeit.cms.browser.preview
import zeit.cms.interfaces
import zeit.connector.interfaces
import zope.component
class WorkingcopyPreview(zeit.cms.browser.preview.Preview):
"""Preview for workingcopy versions of content objects.
This supports two modes of operation:
1. Upload the workingcopy version of an object to the repository, retrieve
the html and return it (proxying the result).
2. Give the workingcopy URL to the preview service (for those who can
traverse it directly) and redirect to it as for the repository preview.
"""
def __call__(self):
url = self.get_preview_url_for(self.context)
if self.should_upload(url):
return self.proxied_preview()
else:
return self.redirect(self.workingcopy_url(url), trusted=True)
def should_upload(self, url):
return 'friedbert' not in url # XXX Really kludgy heuristics
def proxied_preview(self):
preview_obj = self.temporary_checkin()
url = self.get_preview_url_for(preview_obj)
preview_request = urllib2.urlopen(url)
del preview_obj.__parent__[preview_obj.__name__]
return preview_request.read()
def get_preview_url_for(self, preview_context):
url = zope.component.getMultiAdapter(
(preview_context, self.preview_type),
zeit.cms.browser.interfaces.IPreviewURL)
querystring = self.request.environment['QUERY_STRING']
if querystring:
url = '%s?%s' % (url, querystring)
return url
def temporary_checkin(self):
content = zeit.cms.interfaces.ICMSContent(
zeit.connector.interfaces.IResource(self.context))
content.uniqueId = None
target_folder = zeit.cms.interfaces.ICMSContent(
self.context.uniqueId).__parent__
temp_id = self.get_temp_id(self.context.__name__)
target_folder[temp_id] = content
return content
def get_temp_id(self, name):
return 'preview-%s-%s' % (
self.request.principal.id, name)
def workingcopy_url(self, url):
repository_path = urlparse.urlparse(self.context.uniqueId).path
fullpath = self.url(self.context)
workingcopy = self.url(zope.component.getUtility(
zeit.cms.workingcopy.interfaces.IWorkingcopyLocation))
workingcopy_path = fullpath.replace(workingcopy, '')
config = zope.app.appsetup.product.getProductConfiguration('zeit.cms')
workingcopy_path = config[
'friebert-wc-preview-prefix'] + workingcopy_path
url = url.replace(repository_path, workingcopy_path)
return url
|
bsd-3-clause
| -3,901,331,997,260,017,700
| 35.351351
| 78
| 0.662825
| false
| 3.927007
| false
| false
| false
|
mluscon/ci-dnf-stack
|
dnf-docker-test/features/steps/repo_steps.py
|
1
|
5554
|
from __future__ import absolute_import
from __future__ import unicode_literals
import glob
import os
import tempfile
from behave import given
from behave import register_type
from behave import when
from behave.model import Table
import jinja2
import parse
from whichcraft import which
from command_steps import step_i_successfully_run_command
from file_steps import HEADINGS_INI
from file_steps import conf2table
from file_steps import step_a_file_filepath_with
from file_steps import step_an_ini_file_filepath_with
import file_utils
import table_utils
PKG_TMPL = """
Name: {{ name }}
Summary: {{ summary|default("Empty") }}
Version: {{ version|default("1") }}
Release: {{ release|default("1") }}%{?dist}
License: {{ license|default("Public Domain") }}
BuildArch: noarch
{%- if buildrequires is defined %}
{% for buildreq in buildrequires %}
BuildRequires: {{ buildreq }}
{%- endfor %}
{%- endif %}
{%- if requires is defined %}
{% for req in requires %}
Requires: {{ req }}
{%- endfor %}
{%- endif %}
{%- if obsoletes is defined %}
{% for obs in obsoletes %}
Obsoletes: {{ obs }}
{%- endfor %}
{%- endif %}
{%- if provides is defined %}
{% for prv in provides %}
Provides: {{ prv }}
{%- endfor %}
{%- endif %}
%description
%{summary}.
%files
"""
REPO_TMPL = "/etc/yum.repos.d/{!s}.repo"
HEADINGS_REPO = ["Package", "Tag", "Value"]
PKG_TAGS_REPEATING = ["BuildRequires", "Requires", "Obsoletes", "Provides"]
PKG_TAGS = ["Summary", "Version", "Release", "License"] + PKG_TAGS_REPEATING
JINJA_ENV = jinja2.Environment(undefined=jinja2.StrictUndefined)
@parse.with_pattern(r"enable|disable")
def parse_enable_disable(text):
if text == "enable":
return True
if text == "disable":
return False
assert False
register_type(enable_disable=parse_enable_disable)
@when('I remove all repositories')
def step_i_remove_all_repositories(ctx):
"""
Remove all ``*.repo`` files in ``/etc/yum.repos.d/``.
"""
for f in glob.glob("/etc/yum.repos.d/*.repo"):
os.remove(f)
@given('repository "{repository}" with packages')
def given_repository_with_packages(ctx, repository):
"""
Builds dummy noarch packages, creates repo and *.repo* file.
.. note::
Requires *rpmbuild* and *createrepo_c*.
Requires table with following headers:
========= ===== =======
Package Tag Value
========= ===== =======
*Tag* is tag in RPM. Supported ones are:
============= ===============
Tag Default value
============= ===============
Summary Empty
Version 1
Release 1
License Public Domain
BuildRequires []
Requires []
Obsoletes []
Provides []
============= ===============
All packages are built during step execution.
.. note::
*BuildRequires* are ignored for build-time (*rpmbuild* is executed
with ``--nodeps`` option).
Examples:
.. code-block:: gherkin
Feature: Working with repositories
Background: Repository base with dummy package
Given repository base with packages
| Package | Tag | Value |
| foo | | |
Scenario: Installing dummy package from background
When I enable repository base
Then I successfully run "dnf -y install foo"
"""
packages = table_utils.parse_skv_table(ctx, HEADINGS_REPO,
PKG_TAGS, PKG_TAGS_REPEATING)
rpmbuild = which("rpmbuild")
ctx.assertion.assertIsNotNone(rpmbuild, "rpmbuild is required")
createrepo = which("createrepo_c")
ctx.assertion.assertIsNotNone(createrepo, "createrepo_c is required")
tmpdir = tempfile.mkdtemp()
template = JINJA_ENV.from_string(PKG_TMPL)
for name, settings in packages.items():
settings = {k.lower(): v for k, v in settings.items()}
ctx.text = template.render(name=name, **settings)
fname = "{!s}/{!s}.spec".format(tmpdir, name)
step_a_file_filepath_with(ctx, fname)
cmd = "{!s} --define '_rpmdir {!s}' -bb {!s}".format(
rpmbuild, tmpdir, fname)
step_i_successfully_run_command(ctx, cmd)
cmd = "{!s} {!s}".format(createrepo, tmpdir)
step_i_successfully_run_command(ctx, cmd)
repofile = REPO_TMPL.format(repository)
ctx.table = Table(HEADINGS_INI)
ctx.table.add_row([repository, "name", repository])
ctx.table.add_row(["", "enabled", "False"])
ctx.table.add_row(["", "gpgcheck", "False"])
ctx.table.add_row(["", "baseurl", "file://{!s}".format(tmpdir)])
step_an_ini_file_filepath_with(ctx, repofile)
@given('empty repository "{repository}"')
def given_empty_repository(ctx, repository):
"""
Same as :ref:`Given repository "{repository}" with packages`, but without
packages (empty).
"""
ctx.table = Table(HEADINGS_REPO)
given_repository_with_packages(ctx, repository)
@when('I {state:enable_disable} repository "{repository}"')
def i_enable_disable_repository(ctx, state, repository):
"""
Enable/Disable repository with given name.
"""
repofile = REPO_TMPL.format(repository)
conf = file_utils.read_ini_file(repofile)
conf.set(repository, "enabled", str(state))
ctx.table = conf2table(conf)
step_an_ini_file_filepath_with(ctx, repofile)
|
gpl-3.0
| -8,378,289,381,050,230,000
| 28.700535
| 77
| 0.601728
| false
| 3.762873
| false
| false
| false
|
NSasquatch/vocoder
|
toolset.py
|
1
|
2495
|
# -*- coding: latin-1 -*-
"""
This module was created by Silas Gyger, silasgyger@gmail.com.
It stands under CC BY 4.0 License.
http://creativecommons.org/licenses/by/4.0/
"""
import thinkdsp
import numpy as np
from matplotlib import pyplot
class Wave(thinkdsp.Wave):
def __getitem__(self, key):
return self.ys[key]
def __setitem__(self, key, value):
self.ys[key] = value
return self
@property
def length(self):
return len(self.ys)
def multiply_with(self, obj):
"""
Multipliziert alle y-Werte mit den y-Werte einer anderen Welle oder einem Array.
:param obj: toolset.Wave, thinkdsp.Wave or np.array
:return: self
"""
assert isinstance(obj, (Wave, thinkdsp.Wave, np.ndarray)), "The object this Wave should be multiplied with must" \
" either be a %s, %s or a %s instance." % (Wave, thinkdsp.Wave, np.ndarray)
if isinstance(obj, (Wave, thinkdsp.Wave)):
self.ys *= obj.ys
else:
self.ys *= obj
return self
def make_average_amp_curve(self, buffer_size=800):
"""
Creates an amp-curve using the "average" algorithm.
:return: array containing amps
"""
positive = lambda x: 0 if x < 0 else x
amps = np.zeros(self.length)
for i, y in enumerate(self.ys):
buffer_ys = self.ys[positive(i-buffer_size/2):i+buffer_size/2]
amps[i] = abs(np.sum(np.abs(buffer_ys))/buffer_size)
return amps
def make_spectrum(self):
"""Computes the spectrum using FFT.
returns: Spectrum
"""
hs = np.fft.rfft(self.ys)
return Spectrum(hs, self.framerate)
class Spectrum(thinkdsp.Spectrum):
def band_pass(self, position, range, factor=0):
"""
Attenuate all frequencies except the ones inside the cutoffs.
low_cutoff: frequency in Hz
high_cutoff: frequency in Hz
factor: what to multiply the magnitude by
"""
low_cutoff, high_cutoff = position-range, position+range
self.high_pass(low_cutoff, factor)
self.low_pass(high_cutoff, factor)
def make_wave(self):
"""Transforms to the time domain.
returns: Wave
"""
ys = np.fft.irfft(self.hs)
return Wave(ys, self.framerate)
def to_wave(obj, framerate=None):
"""
Converts a thinkdsp-Wave or a numpy-Array to a toolset.Wave.
:param obj: The wave/array
:param framerate: Framerate of wanted wave if obj is a numpy array
"""
if isinstance(obj, thinkdsp.Wave):
return Wave(obj.ys, obj.framerate)
if isinstance(obj, np.ndarray):
if framerate is None:
raise ValueError, "Missing framerate to covert numpy-Array to wave."
else:
return Wave(obj, framerate)
|
cc0-1.0
| 4,411,227,925,583,878,000
| 24.20202
| 116
| 0.694188
| false
| 2.874424
| false
| false
| false
|
nugget/python-insteonplm
|
insteonplm/devices/climateControl.py
|
1
|
3689
|
"""INSTEON Climate Control Device Class."""
import logging
from insteonplm.devices import Device
from insteonplm.constants import COMMAND_EXTENDED_GET_SET_0X2E_0X00
from insteonplm.messages.extendedSend import ExtendedSend
from insteonplm.messages.userdata import Userdata
from insteonplm.states.thermostat import (
Temperature,
Humidity,
SystemMode,
FanMode,
CoolSetPoint,
HeatSetPoint,
)
from insteonplm.states.statusReport import StatusReport
_LOGGER = logging.getLogger(__name__)
class ClimateControl_2441th(Device):
"""Thermostat model 2441TH."""
def __init__(
self, plm, address, cat, subcat, product_key=None, description=None, model=None
):
"""Init the DimmableLightingControl Class."""
Device.__init__(
self, plm, address, cat, subcat, product_key, description, model
)
self._stateList[0x01] = CoolSetPoint(
self._address,
"coolSetPoint",
0x01,
self._send_msg,
self._message_callbacks,
0x00,
)
self._stateList[0x02] = HeatSetPoint(
self._address,
"heatSetPoint",
0x02,
self._send_msg,
self._message_callbacks,
0x00,
)
self._stateList[0xEF] = StatusReport(
self._address,
"statusReport",
0xEF,
self._send_msg,
self._message_callbacks,
0x00,
)
self._system_mode = SystemMode(
self._address,
"systemMode",
0x10,
self._send_msg,
self._message_callbacks,
0x00,
)
self._fan_mode = FanMode(
self._address,
"fanMode",
0x11,
self._send_msg,
self._message_callbacks,
0x00,
)
self._temp = Temperature(
self._address,
"temperature",
0x12,
self._send_msg,
self._message_callbacks,
0x00,
)
self._humidity = Humidity(
self._address,
"humidity",
0x13,
self._send_msg,
self._message_callbacks,
0x00,
)
@property
def cool_set_point(self):
"""Return the cool set point state."""
return self._stateList[0x01]
@property
def heat_set_point(self):
"""Return the heat set point state."""
return self._stateList[0x02]
@property
def system_mode(self):
"""Return the mode state."""
return self._system_mode
@property
def fan_mode(self):
"""Return the mode state."""
return self._fan_mode
@property
def temperature(self):
"""Return the temperature state."""
return self._temp
@property
def humidity(self):
"""Return the humidity state."""
return self._humidity
def async_refresh_state(self):
"""Request each state to provide status update."""
_LOGGER.debug("Setting up extended status")
ext_status = ExtendedSend(
address=self._address,
commandtuple=COMMAND_EXTENDED_GET_SET_0X2E_0X00,
cmd2=0x02,
userdata=Userdata(),
)
ext_status.set_crc()
_LOGGER.debug("Sending ext status: %s", ext_status)
self._send_msg(ext_status)
_LOGGER.debug("Sending temp status request")
self.temperature.async_refresh_state()
# pylint: disable=unused-argument
def _mode_changed(self, addr, group, val):
self.async_refresh_state()
|
mit
| -4,059,959,734,203,768,000
| 24.978873
| 87
| 0.548658
| false
| 4.018519
| false
| false
| false
|
johan92/yafpgatetris
|
string_stuff/big_string_to_mif.py
|
1
|
1180
|
#!/usr/bin/python
import sys
def print_usage( argv ):
print "Creates mif file for ROM for strings.\n Input: raw file with zeros and ones.\n Output: MIF file \nUsage: %s STRING_FILE_NAME MIF_FILE_NAME" % ( sys.argv[0] )
if __name__ == "__main__":
if len( sys.argv ) < 3:
print_usage( sys.argv )
exit( -1 )
if sys.argv[1] == "--help" or sys.argv[1] == "-h":
print_usage( sys.argv )
exit( 0 )
f1 = open( sys.argv[1], "r" )
line_num = 0
lines = []
for line in f1:
print line
lines.append( line[:-1] ) # minus /n
orig_x = len( lines[0] )
orig_y = len( lines )
print "MSG_X = %d" % ( orig_x )
print "MSG_Y = %d" % ( orig_y )
rev_lines = []
for x in xrange( orig_x ):
l = ""
for y in xrange( orig_y ):
l = lines[y][x] + l
rev_lines.append( l )
rom_width = orig_y
rom_depth = orig_x
f2 = open( sys.argv[2], "w" )
f2.write("WIDTH=%d;\n" % rom_width )
f2.write("DEPTH=%d;\n" % rom_depth )
f2.write("ADDRESS_RADIX=HEX;\nDATA_RADIX=BIN;\nCONTENT BEGIN\n" )
for (i, l) in enumerate( rev_lines ):
f2.write( "%s : %s;\n" % ( "{:04x}".format(i), l ) )
f2.write("END;")
|
mit
| 5,125,996,493,227,969,000
| 20.454545
| 169
| 0.534746
| false
| 2.587719
| false
| false
| false
|
ajyoon/brown
|
brown/core/brace.py
|
1
|
3241
|
from brown.core.multi_staff_object import MultiStaffObject
from brown.core.music_font import MusicFontGlyphNotFoundError
from brown.core.music_text import MusicText
from brown.core.staff_object import StaffObject
from brown.utils.point import Point
from brown.utils.units import GraphicUnit
class Brace(MultiStaffObject, StaffObject, MusicText):
"""A brace spanning staves, recurring at line beginnings.
The brace is drawn at the beginning of every line
after its initial x position until the end of the staff.
A brace will be drawn on the first line it appears on
if and only if it is placed *exactly* at the line beginning.
Consequently, `Brace(Mm(0), Mm(1000), some_staves)` will appear
on the first line of the flowable, while
`Brace(Mm(1), Mm(1000), some_staves)` will not begin drawing
until the second line.
"""
def __init__(self, pos_x, staves):
"""
Args:
pos_x (Unit): Where this brace goes into effect
staves (set(Staff)): The staves this brace spans
"""
MultiStaffObject.__init__(self, staves)
StaffObject.__init__(self, self.highest_staff)
# Calculate the height of the brace in highest_staff staff units
scale = self.vertical_span / self.highest_staff.unit(4)
if self.vertical_span > self.highest_staff.unit(50):
text = ('brace', 4)
elif self.vertical_span > self.highest_staff.unit(30):
text = ('brace', 3)
elif self.vertical_span > self.highest_staff.unit(15):
text = ('brace', 2)
elif self.vertical_span > self.highest_staff.unit(4):
text = 'brace'
else:
text = ('brace', 1)
try:
# Attempt to use size-specific optional glyph
MusicText.__init__(self,
(pos_x, self.vertical_span),
text,
self.highest_staff,
scale_factor=scale)
except MusicFontGlyphNotFoundError:
# Default to non-optional glyph
MusicText.__init__(self,
(pos_x, self.vertical_span),
'brace',
self.highest_staff,
scale_factor=scale)
######## PUBLIC PROPERTIES ########
@property
def length(self):
"""Unit: The breakable width of the object.
This is used to determine how and where rendering cuts should be made.
"""
return (self.staff.length
- self.flowable.map_between_locally(self.staff, self).x)
######## PRIVATE METHODS ########
def _render_before_break(self, local_start_x, start, stop, dist_to_line_start):
if start.x == GraphicUnit(0):
self._render_complete(Point(start.x - self.bounding_rect.width, start.y))
def _render_after_break(self, local_start_x, start, stop):
self._render_complete(Point(start.x - self.bounding_rect.width, start.y))
def _render_spanning_continuation(self, local_start_x, start, stop):
self._render_complete(Point(start.x - self.bounding_rect.width, start.y))
|
gpl-3.0
| 2,192,175,412,026,653,000
| 39.012346
| 85
| 0.59241
| false
| 4.031095
| false
| false
| false
|
yw374cornell/e-mission-server
|
emission/analysis/intake/cleaning/cleaning_methods/speed_outlier_detection.py
|
1
|
1286
|
# Techniques for outlier detection of speeds. Each of these returns a speed threshold that
# can be used with outlier detection techniques.
# Standard imports
import logging
class BoxplotOutlier(object):
MINOR = 1.5
MAJOR = 3
def __init__(self, multiplier = MAJOR, ignore_zeros = False):
self.multiplier = multiplier
self.ignore_zeros = ignore_zeros
def get_threshold(self, with_speeds_df):
if self.ignore_zeros:
df_to_use = with_speeds_df[with_speeds_df.speed > 0]
else:
df_to_use = with_speeds_df
quartile_vals = df_to_use.quantile([0.25, 0.75]).speed
logging.debug("quartile values are %s" % quartile_vals)
iqr = quartile_vals.iloc[1] - quartile_vals.iloc[0]
logging.debug("iqr %s" % iqr)
return quartile_vals.iloc[1] + self.multiplier * iqr
class SimpleQuartileOutlier(object):
def __init__(self, quantile = 0.99, ignore_zeros = False):
self.quantile = quantile
self.ignore_zeros = ignore_zeros
def get_threshold(self, with_speeds_df):
if self.ignore_zeros:
df_to_use = with_speeds_df[with_speeds_df.speed > 0]
else:
df_to_use = with_speeds_df
return df_to_use.speed.quantile(self.quantile)
|
bsd-3-clause
| 4,354,731,408,748,637,700
| 34.722222
| 91
| 0.635303
| false
| 3.289003
| false
| false
| false
|
smurn/ifaddr
|
setup.py
|
1
|
1701
|
# Copyright (c) 2014 Stefan C. Mueller
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import sys
import os.path
from setuptools import setup, find_packages
if os.path.exists('README.rst'):
with open('README.rst') as f:
long_description = f.read()
else:
long_description = ""
setup(
name = 'ifaddr',
version = '0.1.6',
description='Enumerates all IP addresses on all network adapters of the system.',
long_description=long_description,
author='Stefan C. Mueller',
author_email='scm@smurn.org',
url='https://github.com/pydron/ifaddr',
packages = find_packages(),
install_requires = ['ipaddress'] if sys.version_info[:2] < (3, 3) else [],
)
|
mit
| 8,916,418,490,596,393,000
| 40.512195
| 85
| 0.736626
| false
| 4.04038
| false
| false
| false
|
snower/forsun
|
forsun/servers/server.py
|
1
|
2811
|
# -*- coding: utf-8 -*-
# 15/6/10
# create by: snower
import logging
import threading
from tornado.ioloop import IOLoop, asyncio
from thrift.protocol.TBinaryProtocol import TBinaryProtocolAcceleratedFactory
from torthrift.transport import TIOStreamTransportFactory
from torthrift.server import TTornadoServer as BaseTTornadoServer
from .processor.Forsun import Processor
from .handler import Handler
from .http import HTTPServer, Application
from .. import timer
from ..status import forsun_status
from .. import config
class TTornadoServer(BaseTTornadoServer):
def process(self, *args, **kwargs):
try:
forsun_status.connecting_count += 1
forsun_status.connected_count += 1
return super(TTornadoServer, self).process(*args, **kwargs)
finally:
forsun_status.connecting_count -= 1
class Server(object):
def __init__(self, forsun):
self.forsun = forsun
self.server = None
self.http_server = None
self.thread = None
def serve_thrift(self):
handler = Handler(self.forsun)
processor = Processor(handler)
tfactory = TIOStreamTransportFactory()
protocol = TBinaryProtocolAcceleratedFactory()
bind_address = config.get("BIND_ADDRESS", "127.0.0.1")
port = config.get("PORT", 6458)
self.server = TTornadoServer(processor, tfactory, protocol)
self.server.bind(port, bind_address)
self.server.start(1)
logging.info("starting server by %s:%s", bind_address, port)
def serve_http(self):
http_bind = config.get("HTTP_BIND")
if not http_bind:
return
(address, port) = http_bind.split(":")
application = Application(self.forsun, debug=False, autoreload=False)
self.http_server = HTTPServer(application, xheaders=True)
self.http_server.bind(int(port), address)
self.http_server.start(1)
logging.info("starting http server by %s", http_bind)
def start(self, init_callback):
def _():
try:
if asyncio is not None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.serve_thrift()
self.serve_http()
ioloop = IOLoop.instance()
ioloop.add_callback(init_callback)
ioloop.start()
except Exception as e:
logging.error("server error: %s", e)
self.forsun.read_event.set()
timer.stop()
self.thread = threading.Thread(target=_)
self.thread.setDaemon(True)
self.thread.start()
def stop(self):
IOLoop.current().add_callback(lambda :IOLoop.current().stop())
logging.info("server stoping")
|
mit
| 2,440,367,594,320,221,000
| 33.292683
| 77
| 0.622199
| false
| 4.085756
| false
| false
| false
|
kakapocoin/kakapocoin-old
|
contrib/pyminer/pyminer.py
|
1
|
6434
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 9131
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
mit
| 3,967,279,225,010,473,500
| 24.531746
| 84
| 0.648896
| false
| 2.83062
| false
| false
| false
|
yaybu/touchdown
|
touchdown/ssh/terminal.py
|
1
|
3244
|
# Copyright 2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import time
from touchdown.core import plan, serializers
from .connection import Connection
try:
from .agent import PosixAgentServer
except ImportError:
PosixAgentServer = None
class SshMixin(object):
def get_proxy_command(self):
kwargs = serializers.Resource().render(self.runner, self.resource)
cmd = [
"/usr/bin/ssh",
"-o",
'User="{username}"'.format(**kwargs),
"-o",
'Port="{port}"'.format(**kwargs),
"-W",
"%h:%p",
kwargs["hostname"],
]
return ["-o", "ProxyCommand={}".format(" ".join(cmd))]
def get_command_and_args(self):
kwargs = serializers.Resource().render(self.runner, self.resource)
cmd = [
self.get_command(),
"-o",
'User="{username}"'.format(**kwargs),
"-o",
'Port="{port}"'.format(**kwargs),
"-o",
'HostName="{hostname}"'.format(**kwargs),
]
if self.resource.proxy:
proxy = self.runner.get_plan(self.resource.proxy)
cmd.extend(proxy.get_proxy_command())
return cmd
def run(self, args):
cmd = self.get_command_and_args()
cmd.extend(args)
environ = os.environ.copy()
if self.resource.private_key and PosixAgentServer:
socket_dir = tempfile.mkdtemp(prefix="ssh-")
socket_file = os.path.join(socket_dir, "agent.{}".format(os.getpid()))
environ["SSH_AUTH_SOCK"] = socket_file
del environ["SHELL"]
child_pid = os.fork()
if child_pid:
a = PosixAgentServer(socket_file)
a.add(self.resource.private_key, "touchdown.pem")
try:
a.serve_while_pid(child_pid)
finally:
shutil.rmtree(socket_dir)
return
while not os.path.exists(socket_file):
time.sleep(0.5)
os.execvpe(cmd[0], cmd, environ)
class SshPlan(plan.Plan, SshMixin):
name = "ssh"
resource = Connection
def get_command(self):
return "/usr/bin/ssh"
def get_command_and_args(self):
cmd = super(SshPlan, self).get_command_and_args()
cmd.append("remote")
return cmd
def execute(self, args):
self.run(args)
class ScpPlan(plan.Plan, SshMixin):
name = "scp"
resource = Connection
def get_command(self):
return "/usr/bin/scp"
def execute(self, source, destination):
self.run([source, destination])
|
apache-2.0
| -4,779,550,030,188,554,000
| 26.965517
| 82
| 0.579531
| false
| 3.970624
| false
| false
| false
|
HumanDynamics/openbadge-analysis
|
openbadge_analysis/visualization/contribution.py
|
1
|
4081
|
import datetime
from bokeh.charts import Area, output_file, show
from bokeh.models.formatters import DatetimeTickFormatter
from bokeh.models.widgets import Panel, Tabs
def unix_time_ms(dt):
"""
Converts datetime to timestamp float (milliseconds) for plotting
:param dt: datetime
:return: timestamp float (ms)
"""
epoch = datetime.datetime.utcfromtimestamp(0)
return (dt - epoch).total_seconds()*1000
def contribution_plot(df_stitched, meeting_name, rolling=True, member_names=None):
"""
Creates a collection of 4 stacked area graphs that show seconds of contribution per member per minute for a meeting.
The four graphs are: 1 min, 30 sec, 10 sec, 5 sec resampling frequencies.
:param df_stitched: DataFrame whose values are boolean and indicate whether a badge wearer (column name) was
speaking at a particular timestamp, has columns: datetime, member1, member2, etc.
:param meeting_name: Name of meeting (usually uuid), i.e. the part of the log file before '.txt'
:param rolling: True or False. Whether or not to generate the graph with a rolling mean (which makes the graph
smoother but might not most accurately represent the data). True by default
:param member_names: A dictionary mapping member keys to member names (First Last format)
:return: bokeh Tabs holding 4 stacked area graphs
"""
def area_chart(df, interval, rolling):
# re-sampling
df = df_stitched.resample(str(interval)+'S').sum().fillna(0)/(1000/50)*(60/interval) # Each sample is 50ms
# Gives number of seconds spoken per min
# rename columns if names were given
if member_names:
for member_key in member_names:
df.rename(columns=member_names, inplace=True)
if rolling:
df = df.rolling(min_periods=1, window=5, center=True).mean() # To smooth graph
start = unix_time_ms(df.index[0])
start_datetime = datetime.datetime.utcfromtimestamp(start/1000)
end = unix_time_ms(df.index[len(df.index)-1])
end_datetime = datetime.datetime.utcfromtimestamp(end/1000)
df.reset_index(level='datetime', inplace=True) # To input x values into area chart
if rolling:
graph_title = 'Contribution per Minute per Member for Meeting ' + meeting_name + ' (with rolling mean) \
from ' + start_datetime.strftime('%I:%M %p')+' to '+end_datetime.strftime('%I:%M %p')
else:
graph_title = 'Contribution per Minute per Member for Meeting ' + meeting_name + ' (without rolling mean) \
from ' + start_datetime.strftime('%I:%M %p')+' to '+end_datetime.strftime('%I:%M %p')
area = Area(
df,
x='datetime', # Column name
title=graph_title, legend='top_left',
stack=True, xlabel='Time of Day', ylabel='Number of Seconds',
xscale='datetime',
width=1700, height=400,
tools='xpan, xwheel_zoom, box_zoom, reset, resize',
)
# Format tick labels on x-axis
area.below[0].formatter = DatetimeTickFormatter()
area.below[0].formatter.formats = dict(years=['%Y'], months=['%b %Y'], days=['%d %b %Y'],
hours=['%I:%M %P'], hourmin=['%I:%M %P'],
minutes=['%I:%M %P'], minsec=['%I:%M:%S %P'],
seconds=['%I:%M:%S %P'])
return area
area5 = area_chart(df_stitched, 5, rolling)
tab5 = Panel(child=area5, title='5 Second Intervals')
area10 = area_chart(df_stitched, 10, rolling)
tab10 = Panel(child=area10, title='10 Second Intervals')
area30 = area_chart(df_stitched, 30, rolling)
tab30 = Panel(child=area30, title='30 Second Intervals')
area60 = area_chart(df_stitched, 60, rolling)
tab60 = Panel(child=area60, title='60 Second Intervals')
plots = Tabs(tabs=[tab60, tab30, tab10, tab5])
return plots
|
mit
| 410,644,883,875,665,500
| 44.865169
| 120
| 0.616025
| false
| 3.817587
| false
| false
| false
|
ludobox/ludobox-ui
|
server/ludobox/history.py
|
2
|
5286
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Record and manage file changes and keep track of history.
Key concepts are :
- events : everytime somethin is changed, we use this event
- history : the whole thread of events that applies to a page
For each event, a unique SHA id is created (like git https://stackoverflow.com/questions/29106996/git-what-is-a-git-commit-id )
"""
import hashlib
import time
import json
from flask import current_app
from jsonpatch import make_patch, JsonPatch
# TODO : implement state changes (draft -> reviewed, etc.)
event_types = ["create", "update", "delete", "change_state"]
# hashing changes to create an id
sha_1 = hashlib.sha1()
def new_event(event_type, content, user=None):
if event_type not in event_types:
raise ValueError(
"Event type should be one of the following %s"%", ".join(event_types))
if type(content) is not dict:
raise ValueError(
"Event content should be a JSON-compatible object.")
# timestamp
ts = int(time.time())
# generate unique ID using the whole content
sha_1.update("%s - %s - %s - %s"%(event_type, content, user, ts) )
sha_id = sha_1.hexdigest()
return {
"type" : event_type,
"content" : content,
"user" : user,
"id" : sha_id,
"ts" : ts
}
def is_valid_event(event):
assert type(event) is dict
assert type(event["id"]) is str or unicode
assert len(event["id"]) is 40
assert type(event["content"]) is dict
assert type(event["ts"]) is int
assert event["type"] in event_types
return True
def add_event_to_history(content_previous_version, event):
"""
Does 3 things :
- create threaded history of events if empty
- add current event to history
- replace old content by the new
"""
assert is_valid_event(event)
# immutable: clone original reference
content_with_updated_history = content_previous_version.copy()
# init history if empty
if "history" not in content_with_updated_history.keys():
content_with_updated_history["history"] = []
# re-apply changes and store last version
if event["type"] == "update":
content_with_updated_history = apply_update_patch(content_with_updated_history, event)
elif event["type"] == "change_state":
new_state = event["content"]["to"]
content_with_updated_history["state"] = new_state
# add event to history
content_with_updated_history["history"].append(event)
current_app.logger.debug("Event : %s - %s"%(event["type"], content_with_updated_history))
return content_with_updated_history
def make_create_event(content, user=None):
# make sure there is no prior history
if "history" in content.keys() and len(content["history"]) !=0:
raise ValueError("You are trying to use the CREATE action on a game that already has an history.")
# check if there is actual changes
if content is None or len(content.keys()) == 0:
return None
# create a new event and add it to history
event = new_event("create", content.copy(), user)
return event
def make_update_event(old_content, new_content, user=None):
# make things immutable
new = new_content.copy()
old = old_content.copy()
# ignore keys we don't want to track in the history events
ignored_keys = ["history", "files", "errors", "has_errors"]
for k in ignored_keys:
new.pop(k, None)
old.pop(k, None)
# create json diff
patch = make_patch(new, old)
# check if there is actual changes
if not len(list(patch)) :
return None
# create a new event and add it to history
event = new_event("update", { "changes" : list(patch) }, user)
return event
def make_update_state_event(old_content, updated_content_state, user=None):
"""Store an event reflecting content update"""
original_state = old_content["state"]
state_change = { "from" : original_state, "to" : updated_content_state}
# create a new event and add it to history
event = new_event("change_state", state_change, user)
return event
def apply_update_patch(content, event):
"""Apply JSON diff patches to content"""
patch = JsonPatch(event["content"]["changes"])
final_content = patch.apply(content)
return final_content
def apply_history(history, selected_id):
"""
Re-apply the chain of events from the history until selected id
returns the content *without* the history
"""
# check the hash format
assert type(selected_id) is str
assert len(selected_id) is 40
# filter history
final_content = {}
# run again the course of events
for event in history:
if not is_valid_event(event) :
raise ValueError("Event does not follow a proper format.")
# check event type
if event["type"] == "create": # init with full content
final_content = event["content"]
elif event["type"] == "update":
final_content = apply_update_patch(final_content, event)
elif event["type"] == "change_state":
new_state = event["content"]["to"]
# run until last is
if event["id"] == selected_id :
return final_content
|
agpl-3.0
| -5,003,481,011,867,409,000
| 29.37931
| 127
| 0.644344
| false
| 3.835994
| false
| false
| false
|
cineuse/CNCGToolKit
|
cgtkLibs/cgtk_os/TemporaryDirectory.py
|
1
|
2998
|
# coding=utf8
# Copyright (c) 2016 Strack
import os
import warnings
from tempfile import mkdtemp
import logging
import cgtk_log
log = cgtk_log.cgtk_log(level=logging.INFO)
class TemporaryDirectory(object):
"""
Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
Examples:
>>> with TemporaryDirectory() as tmp_dir:
>>> ...
Upon exiting the context, the directory and everything contained
in it are removed.
"""
def __init__(self, suffix="", prefix="tmp", dir_=None):
self._closed = False
self.name = None # Handle mkdtemp raising an exception
self.name = mkdtemp(suffix, prefix, dir_)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def cleanup(self, _warn=False):
if self.name and not self._closed:
try:
self._rmtree(self.name)
except (TypeError, AttributeError) as ex:
# Issue #10188: Emit a warning on
# if the directory could not be cleaned
# up due to missing globals
if "None" not in str(ex):
log.info("ERROR: {!r} while cleaning up {!r}".format(ex, self, ))
raise
return
self._closed = True
if _warn:
log.warning("Implicitly cleaning up {!r}".format(self))
def __exit__(self, exc, value, tb):
self.cleanup()
def __del__(self):
# Issue a ResourceWarning if implicit cleanup needed
self.cleanup(_warn=True)
# XXX (ncoghlan): The following code attempts to make
# this class tolerant of the module nulling out process
# that happens during CPython interpreter shutdown
# Alas, it doesn't actually manage it. See issue #10188
_listdir = staticmethod(os.listdir)
_path_join = staticmethod(os.path.join)
_isdir = staticmethod(os.path.isdir)
_islink = staticmethod(os.path.islink)
_remove = staticmethod(os.remove)
_rmdir = staticmethod(os.rmdir)
_warn = warnings.warn
def _rmtree(self, path):
# Essentially a stripped down version of shutil.rmtree. We can't
# use globals because they may be None'ed out at shutdown.
for name in self._listdir(path):
fullname = self._path_join(path, name)
try:
isdir = self._isdir(fullname) and not self._islink(fullname)
except OSError:
isdir = False
if isdir:
self._rmtree(fullname)
else:
try:
self._remove(fullname)
except OSError:
pass
try:
self._rmdir(path)
except OSError:
pass
if __name__ == "__main__":
with TemporaryDirectory() as tmp_dir:
print tmp_dir
|
mit
| -9,125,791,930,769,588,000
| 29.907216
| 85
| 0.571714
| false
| 4.307471
| false
| false
| false
|
modcloth/tory-client
|
tory_client/unregister.py
|
1
|
2146
|
# vim:fileencoding=utf-8
import argparse
import logging
import os
import sys
from . import __version__
from .client import delete_host
from .junkdrawer import HelpFormatter
USAGE = """%(prog)s [options]
Unregister host(s) in tory.
"""
EPILOGUE = """\
Examples:
# Unregister a machine by name
%(prog)s --name foo-bar.example.com
# Unregister a machine by ipv4
%(prog)s --name 192.168.113.29
# Unregister a whole bunch of machines with hostnames that
# start with "generic-"
tory-inventory | \\
jq -r '._meta | .hostvars | .[] |
select(.hostname | startswith("generic-")) |
.hostname' | \\
xargs %(prog)s -n
"""
DEFAULT_TORY_SERVER = 'http://localhost:9462/ansible/hosts'
def main(sysargs=sys.argv[:]):
parser = argparse.ArgumentParser(
usage=USAGE,
formatter_class=HelpFormatter,
epilog=EPILOGUE,
)
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument(
'-n', '--name',
nargs='+',
metavar='TORY_HOSTNAME',
default=list(filter(lambda s: s != '', [
_s.strip() for _s in os.environ.get('TORY_HOSTNAME', '').split()
])),
help='host name(s) or ip(s) to uregister',
)
parser.add_argument(
'-s', '--tory-server',
default=os.environ.get('TORY_SERVER', DEFAULT_TORY_SERVER),
help='tory inventory server (including path)'
)
parser.add_argument(
'-A', '--auth-token',
default=os.environ.get('TORY_AUTH_TOKEN', 'swordfish'),
metavar='TORY_AUTH_TOKEN',
help='tory server auth token'
)
args = parser.parse_args(sysargs[1:])
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
log = logging.getLogger('tory-unregister')
n_failures = 0
for identifier in args.name:
status = delete_host(args.tory_server, args.auth_token, identifier)
if status == 204:
log.info('Removed host %s', identifier)
else:
log.warn('Failed to remove host %s: %s',
identifier, status)
n_failures += 1
return n_failures
|
mit
| -5,951,089,021,157,053,000
| 25.493827
| 76
| 0.602982
| false
| 3.52381
| false
| false
| false
|
bitcoinfees/bitcoin-feemodel
|
feemodel/txmempool.py
|
1
|
32024
|
from __future__ import division
import os
import threading
import sqlite3
import decimal
import logging
from time import time
from copy import copy
from itertools import groupby
from operator import attrgetter, itemgetter
from bitcoin.core import b2lx
from feemodel.config import config, datadir, MINRELAYTXFEE, PRIORITYTHRESH
from feemodel.util import (proxy, StoppableThread, get_feerate, WorkerThread,
cumsum_gen, BlockMetadata, StepFunction)
from feemodel.stranding import tx_preprocess, calc_stranding_feerate
from feemodel.simul.simul import SimEntry
logger = logging.getLogger(__name__)
db_lock = threading.Lock()
MEMBLOCK_SCHEMA = {
"blocks": [
'height INTEGER PRIMARY KEY',
'size INTEGER',
'time INTEGER'
],
"txs": [
"id INTEGER PRIMARY KEY",
"txid TEXT",
"size INTEGER",
"fee TEXT",
"startingpriority TEXT",
"time INTEGER",
"height INTEGER",
"depends TEXT",
"feerate INTEGER",
"heightremoved INTEGER"
],
"blocktxs": [
"blockheight INTEGER",
"txrowid INTEGER",
"currentpriority TEXT",
"isconflict INTEGER",
"inblock INTEGER"
]
}
# TODO: remove this when transition to new DB is complete
OLD_MEMBLOCK_TABLE_SCHEMA = {
'blocks': [
'height INTEGER UNIQUE',
'size INTEGER',
'time INTEGER'
],
'txs': [
'blockheight INTEGER',
'txid TEXT',
'size INTEGER',
'fee TEXT',
'startingpriority TEXT',
'currentpriority TEXT',
'time INTEGER',
'height INTEGER',
'depends TEXT',
'feerate INTEGER',
'leadtime INTEGER',
'isconflict INTEGER',
'inblock INTEGER'
]
}
MEMBLOCK_DBFILE = os.path.join(datadir, 'memblock.db')
class TxMempool(StoppableThread):
'''Thread that tracks the mempool state at points of block discovery.
When the thread is running, Bitcoin Core is polled every poll_period
seconds over JSON-RPC for:
1. The current block count, via getblockcount().
2. The transactions in the mempool, via getrawmempool(verbose=True)
If the block count has increased in between polls, we record:
1. Transactions in the mempool just prior to block discovery
2. For each transaction, whether or not it was included in the block.
The goal is to make inferences about the transaction selection policies
of miners.
The polling is done via batch call; however, they are not processed
atomically by Bitcoin Core - there is the probability of a race condition
in which the block count increases in between processing the two requests.
In this case the statistics for that block will be somewhat degraded.
In addition, chain re-orgs are not handled. If a re-org happens, the
transactions that we record are not necessarily representative of the
pool of valid transactions seen by the miner. Any inference algorithm
must be tolerant of such errors, in addition to any other kinds of network
errors.
'''
def __init__(self, dbfile=MEMBLOCK_DBFILE,
blocks_to_keep=config.getint("txmempool", "blocks_to_keep"),
poll_period=config.getfloat("txmempool", "poll_period")):
self.state = None
self.blockworker = None
self.dbfile = dbfile
self.blocks_to_keep = blocks_to_keep
self.poll_period = poll_period
super(TxMempool, self).__init__()
@StoppableThread.auto_restart(60)
def run(self):
"""Target function of the thread.
Updates mempool every poll_period seconds.
"""
logger.info("Starting TxMempool with {} blocks_to_keep.".
format(self.blocks_to_keep))
logger.info("memblock dbfile is at {}".format(self.dbfile))
self.blockworker = WorkerThread(self.process_blocks)
self.blockworker.start()
try:
self.state = get_mempool_state()
while not self.is_stopped():
self.update()
self.sleep(self.poll_period)
finally:
self.blockworker.stop()
self.state = None
logger.info("TxMempool stopped.")
def update(self):
"""Update the mempool state.
If block height has increased, call self.process_blocks through
blockworker thread.
"""
newstate = get_mempool_state()
if newstate.height > self.state.height:
self.blockworker.put(self.state, newstate)
self.state = newstate
logger.debug(repr(newstate))
return newstate
def process_blocks(self, prevstate, newstate):
"""Record the mempool state in a MemBlock.
This is called in self.blockworker.run.
"""
# Make a copy because we are going to mutate it
prevstate = copy(prevstate)
memblocks = []
while prevstate.height < newstate.height:
memblock = MemBlock()
memblock.record_block(prevstate)
memblocks.append(memblock)
# The set of transactions that were removed from the mempool, yet
# were not included in a block.
conflicts = (prevstate - newstate).entries
conflicts_size = sum([entry.size for entry in conflicts.values()])
for txid in conflicts:
# For the first block, label the MemBlock entries that are
# conflicts. Assume the conflict was removed after the first
# block, so remove them from the remaining blocks.
memblocks[0].entries[txid].isconflict = True
for memblock in memblocks[1:]:
del memblock.entries[txid]
if len(conflicts):
logger.info("process_blocks: {} conflicts ({} bytes) removed.".
format(len(conflicts), conflicts_size))
if conflicts_size > 10000:
# If many conflicts are removed, it can screw up the txsource
# estimation; so log a warning.
logger.warning("process_blocks: {} bytes of conflicts removed.".
format(conflicts_size))
if self.dbfile and self.is_alive():
for memblock in memblocks:
try:
memblock.write(self.dbfile, self.blocks_to_keep)
except Exception:
logger.exception("MemBlock write/del exception.")
return memblocks
def get_stats(self):
stats = {
"params": {
"poll_period": self.poll_period,
"blocks_to_keep": self.blocks_to_keep
},
"num_memblocks": len(MemBlock.get_heights())
}
state = self.state
if state is not None:
stats.update(state.get_stats())
return stats
def __nonzero__(self):
return self.state is not None
class MempoolState(object):
"""Mempool state.
Comprised of:
height - the block height
entries - dictionary of mempool entries
time - time in seconds
"""
def __init__(self, height, rawmempool):
self.height = height
self.entries = {txid: MemEntry.from_rawentry(rawentry)
for txid, rawentry in rawmempool.iteritems()}
self.time = int(time())
def get_sizefn(self):
entries = sorted(self.entries.values(), key=attrgetter("feerate"),
reverse=True)
sizebyfee = [
(feerate, sum([entry.size for entry in feegroup]))
for feerate, feegroup in groupby(entries, attrgetter("feerate"))]
if not sizebyfee:
return StepFunction([0, 1], [0, 0])
feerates_rev, sizes = zip(*sizebyfee)
cumsize_rev = list(cumsum_gen(sizes))
feerates = list(reversed(feerates_rev))
cumsize = list(reversed(cumsize_rev))
sizefn = StepFunction(feerates, cumsize)
sizefn.addpoint(feerates[-1]+1, 0)
return sizefn
def get_stats(self):
sizefn = self.get_sizefn()
approxfn = sizefn.approx()
feerates_approx, cumsize_approx = zip(*approxfn)
size_with_fee = sizefn(MINRELAYTXFEE)
stats = {
"cumsize": {
"feerates": feerates_approx,
"size": cumsize_approx,
},
"currheight": self.height,
"numtxs": len(self.entries),
"sizewithfee": size_with_fee
}
return stats
def __copy__(self):
cpy = MempoolState(self.height, {})
cpy.entries = {txid: copy(entry)
for txid, entry in self.entries.iteritems()}
cpy.time = self.time
return cpy
def __sub__(self, other):
if not isinstance(other, MempoolState):
raise TypeError("Operands must both be MempoolState instances.")
result = MempoolState(self.height - other.height, {})
result.time = self.time - other.time
result.entries = {
txid: self.entries[txid]
for txid in set(self.entries) - set(other.entries)
}
return result
def __repr__(self):
return "MempoolState(height: {}, entries: {}, time: {})".format(
self.height, len(self.entries), self.time)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return self.__dict__ != other.__dict__
class BaseMemBlock(MempoolState):
"""Independent of DB format."""
def __init__(self):
# The attributes inherited from MempoolState
self.height = None
self.entries = None
self.time = None
# MemBlock specific attributes
self.blockheight = None
self.blocksize = None
def record_block(self, state):
self.height = state.height
self.entries = {txid: copy(entry)
for txid, entry in state.entries.iteritems()}
self.time = state.time
for entry in self.entries.values():
entry.inblock = False
entry.isconflict = False
entry.leadtime = self.time - entry.time
self.blockheight = state.height + 1
block = proxy.getblock(proxy.getblockhash(self.blockheight))
self.blocksize = len(block.serialize())
blockname = BlockMetadata(self.blockheight).get_poolname()
blocktxids = [b2lx(tx.GetHash()) for tx in block.vtx]
entries_inblock = set(self.entries) & set(blocktxids)
for txid in entries_inblock:
self.entries[txid].inblock = True
# Delete it, because state.entries will be used for the next block
# if there are > 1 blocks in this update cycle.
del state.entries[txid]
# Get rid of broken deps, for multiple blocks
for entry in state.entries.values():
entry.depends = filter(lambda dep: dep in state.entries,
entry.depends)
stats = self.calc_stranding_feerate(bootstrap=False)
if stats:
stranding_feerate = stats['sfr']
abovekn = stats['abovekn']
belowkn = stats['belowkn']
else:
stranding_feerate = None
abovekn = None
belowkn = None
blocktext = (
'Block {} ({} bytes) by {}: {}/{} in mempool, '
'SFR/akn/bkn: {}/{}/{}'.format(
self.blockheight, self.blocksize, blockname,
len(entries_inblock), len(blocktxids)-1,
stranding_feerate, abovekn, belowkn))
logger.info(blocktext)
# As a measure of our node's connectivity, we want to note the
# ratio below. If it is low, it means that our node is not being
# informed of many transactions.
if len(blocktxids) > 1:
incl_ratio = len(entries_inblock) / (len(blocktxids)-1)
if incl_ratio < 0.9:
logger.warning("Only {}/{} in block {}.".format(
len(entries_inblock), len(blocktxids)-1,
self.blockheight))
state.height += 1
def calc_stranding_feerate(self, bootstrap=False):
if not self:
raise ValueError("Empty memblock.")
txs = tx_preprocess(self)
if txs:
return calc_stranding_feerate(txs, bootstrap=bootstrap)
return None
def __nonzero__(self):
return self.entries is not None
def __repr__(self):
return "MemBlock(blockheight: %d, blocksize: %d, len(entries): %d)" % (
self.blockheight, self.blocksize, len(self.entries))
def __copy__(self):
raise NotImplementedError
class MemBlock(BaseMemBlock):
'''The mempool state at the time a block was discovered.'''
def write(self, dbfile, blocks_to_keep):
'''Write MemBlock to disk.
blocks_to_keep specifies how many blocks of information should be
retained. All MemBlocks older (with respect to this block) than
blocks_to_keep will be deleted.
'''
if not self:
raise ValueError("Failed write: empty memblock.")
# Temp tables
NONREMOVED = "nonremoved"
MEMBLOCKTXS = "memblocktxs"
db = None
memblocktxids = self.entries.keys()
try:
with db_lock:
db = sqlite3.connect(dbfile)
for key, val in MEMBLOCK_SCHEMA.items():
db.execute('CREATE TABLE IF NOT EXISTS %s (%s)' %
(key, ','.join(val)))
db.execute('CREATE INDEX IF NOT EXISTS heightidx '
'ON txs (heightremoved)')
db.execute('CREATE INDEX IF NOT EXISTS block_heightidx '
'ON blocktxs (blockheight)')
# Enter into blocks
db.execute(
'INSERT INTO blocks VALUES (?,?,?)',
(self.blockheight, self.blocksize, self.time))
# Temporary tables for data manipulation
db.execute(
"CREATE TEMP TABLE {} (id INTEGER, txid TEXT)".
format(NONREMOVED))
db.execute(
"CREATE TEMP TABLE {} "
"(txid TEXT, isconflict INTEGER, inblock INTEGER)".
format(MEMBLOCKTXS))
# Fetch the nonremoved txs
db.execute(
"INSERT INTO {} "
"SELECT id, txid FROM txs "
"WHERE heightremoved IS NULL".format(NONREMOVED)
)
# Table the memblocktxs
db.executemany(
"INSERT INTO {} VALUES (?,?,?)".format(MEMBLOCKTXS),
[(txid,
self.entries[txid].isconflict,
self.entries[txid].inblock)
for txid in memblocktxids])
# Update the heightremoved
db.execute(
"UPDATE txs SET heightremoved=? "
"WHERE id IN "
"(SELECT id FROM {0} LEFT JOIN {1} "
" ON {0}.txid={1}.txid WHERE "
" {1}.isconflict=1 OR "
" {1}.inblock=1 OR "
" {1}.inblock is NULL)".
format(NONREMOVED, MEMBLOCKTXS),
(self.blockheight,)
)
# Get the new txs to table
txidstoenter = db.execute(
"SELECT txid FROM {} EXCEPT SELECT txid FROM {}".
format(MEMBLOCKTXS, NONREMOVED)
)
txstoenter = [
(
txid,
self.entries[txid].size,
str(self.entries[txid].fee),
str(self.entries[txid].startingpriority),
self.entries[txid].time,
self.entries[txid].height,
','.join(self.entries[txid].depends),
self.entries[txid].feerate,
self.blockheight if (
self.entries[txid].isconflict or
self.entries[txid].inblock)
else None
)
for txid in map(itemgetter(0), txidstoenter)
]
# Enter new txs. There might be duplicate txid,
# but that's OK!
db.executemany(
"INSERT INTO txs(txid, size, fee, startingpriority, "
"time, height, depends, feerate, heightremoved) "
"VALUES (?,?,?,?,?,?,?,?,?)", txstoenter)
# Get the rowids, to enter into blocktxs
finaltxs = db.execute(
"SELECT id, txid FROM txs WHERE "
"heightremoved IS NULL OR "
"heightremoved=?",
(self.blockheight,)
).fetchall()
rowidmap = {txid: rowid for rowid, txid in finaltxs}
# Assert that there are no duplicate txids
assert len(finaltxs) == len(set(map(itemgetter(1), finaltxs)))
# Enter into blocktxs
blocktxstoenter = [(
self.blockheight,
rowidmap[txid],
str(self.entries[txid].currentpriority),
self.entries[txid].isconflict,
self.entries[txid].inblock)
for txid in memblocktxids
]
db.executemany("INSERT INTO blocktxs VALUES (?,?,?,?,?)",
blocktxstoenter)
# Remove old blocks
if blocks_to_keep > 0:
height_thresh = self.blockheight - blocks_to_keep
db.execute("DELETE FROM txs WHERE heightremoved<=?",
(height_thresh,))
db.execute("DELETE FROM blocks WHERE height<=?",
(height_thresh,))
db.execute("DELETE FROM blocktxs WHERE blockheight<=?",
(height_thresh,))
db.commit()
finally:
if db is not None:
db.close()
@classmethod
def read(cls, blockheight, dbfile=MEMBLOCK_DBFILE):
'''Read MemBlock from disk.
Returns the memblock with specified blockheight.
Returns None if no record exists for that block.
Raises one of the sqlite3 errors if there are other problems.
'''
if not os.path.exists(dbfile):
return None
db = None
try:
db = sqlite3.connect(dbfile)
with db_lock:
block = db.execute('SELECT size, time FROM blocks '
'WHERE height=?',
(blockheight,)).fetchall()
txlist = db.execute(
"SELECT "
" txid,"
" size,"
" fee,"
" startingpriority,"
" currentpriority,"
" time,"
" height,"
" depends,"
" feerate,"
" isconflict,"
" inblock "
"FROM blocktxs LEFT JOIN txs ON blocktxs.txrowid=txs.id "
"WHERE blockheight=?",
(blockheight,)).fetchall()
finally:
if db is not None:
db.close()
# Make sure there are no missing txs.
txids = map(itemgetter(0), txlist)
assert not any([txid is None for txid in txids])
if block:
blocksize, blocktime = block[0]
else:
return None
memblock = cls()
memblock.height = blockheight - 1
entries = {}
for tx in txlist:
entry = MemEntry()
entry.size = tx[1]
entry.fee = decimal.Decimal(tx[2])
entry.startingpriority = decimal.Decimal(tx[3])
entry.currentpriority = decimal.Decimal(tx[4])
entry.time = tx[5]
entry.height = tx[6]
entry.depends = tx[7].split(",") if tx[7] else []
# We need to do this because depends is recorded upon first sight
# of the tx; some deps might have confirmed in the meantime
entry.depends = filter(lambda dep: dep in txids, entry.depends)
entry.feerate = tx[8]
entry.isconflict = bool(tx[9])
entry.inblock = bool(tx[10])
entry.leadtime = blocktime - tx[5]
entries[tx[0]] = entry
memblock.entries = entries
memblock.time = blocktime
memblock.blockheight = blockheight
memblock.blocksize = blocksize
return memblock
@staticmethod
def get_heights(blockrangetuple=None, dbfile=MEMBLOCK_DBFILE):
'''Get the list of MemBlocks stored on disk.
Returns a list of heights of all MemBlocks on disk within
range(*blockrangetuple)
'''
if not os.path.exists(dbfile):
return []
if blockrangetuple is None:
blockrangetuple = (0, float("inf"))
db = None
try:
db = sqlite3.connect(dbfile)
with db_lock:
heights = db.execute(
'SELECT height FROM blocks '
'where height>=? and height <?',
blockrangetuple).fetchall()
finally:
if db is not None:
db.close()
return [r[0] for r in heights]
# TODO: Remove this when transition to new db is complete.
class OldMemBlock(BaseMemBlock):
'''The mempool state at the time a block was discovered.'''
def write(self, dbfile, blocks_to_keep):
'''Write MemBlock to disk.
blocks_to_keep specifies how many blocks of information should be
retained. All MemBlocks older (with respect to this block) than
blocks_to_keep will be deleted.
'''
if not self:
raise ValueError("Failed write: empty memblock.")
db = None
try:
db = sqlite3.connect(dbfile)
for key, val in OLD_MEMBLOCK_TABLE_SCHEMA.items():
db.execute('CREATE TABLE IF NOT EXISTS %s (%s)' %
(key, ','.join(val)))
db.execute('CREATE INDEX IF NOT EXISTS heightidx '
'ON txs (blockheight)')
db.execute('CREATE INDEX IF NOT EXISTS blocks_heightidx '
'ON blocks (height)')
with db_lock:
with db:
db.execute(
'INSERT INTO blocks VALUES (?,?,?)',
(self.blockheight, self.blocksize, self.time))
db.executemany(
'INSERT INTO txs VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)',
[(self.blockheight, txid) + entry._get_attr_tuple()
for txid, entry in self.entries.iteritems()])
if blocks_to_keep > 0:
height_thresh = self.blockheight - blocks_to_keep
with db:
db.execute('DELETE FROM blocks WHERE height<=?',
(height_thresh,))
db.execute('DELETE FROM txs WHERE blockheight<=?',
(height_thresh,))
finally:
if db is not None:
db.close()
@classmethod
def read(cls, blockheight, dbfile=MEMBLOCK_DBFILE):
'''Read MemBlock from disk.
Returns the memblock with specified blockheight.
Returns None if no record exists for that block.
Raises one of the sqlite3 errors if there are other problems.
'''
if not os.path.exists(dbfile):
return None
db = None
try:
db = sqlite3.connect(dbfile)
with db_lock:
block = db.execute('SELECT size, time FROM blocks '
'WHERE height=?',
(blockheight,)).fetchall()
txlist = db.execute('SELECT * FROM txs WHERE blockheight=?',
(blockheight,)).fetchall()
finally:
if db is not None:
db.close()
if block:
blocksize, blocktime = block[0]
else:
return None
memblock = cls()
memblock.height = blockheight - 1
memblock.entries = {
tx[1]: MemEntry._from_attr_tuple(tx[2:]) for tx in txlist}
memblock.time = blocktime
memblock.blockheight = blockheight
memblock.blocksize = blocksize
return memblock
@staticmethod
def get_heights(blockrangetuple=None, dbfile=MEMBLOCK_DBFILE):
'''Get the list of MemBlocks stored on disk.
Returns a list of heights of all MemBlocks on disk within
range(*blockrangetuple)
'''
if not os.path.exists(dbfile):
return []
if blockrangetuple is None:
blockrangetuple = (0, float("inf"))
db = None
try:
db = sqlite3.connect(dbfile)
with db_lock:
heights = db.execute(
'SELECT height FROM blocks '
'where height>=? and height <?',
blockrangetuple).fetchall()
finally:
if db is not None:
db.close()
return [r[0] for r in heights]
class MemEntry(SimEntry):
'''Represents a mempool entry.
This is basically the data returned by getrawmempool, but with additional
attributes if it is associated with a MemBlock:
inblock - whether or not the transaction was included in the block
leadtime - difference between block discovery time and mempool entry
time
isconflict - whether or not the transaction is a conflict, meaning
that it was subsequently removed from the mempool as a
result of being invalidated by some other transaction
in the block.
In addition, for convenience we compute and store the feerate (satoshis
per kB of transaction size).
Also, care is taken not to mutate the rawmempool_entry input.
'''
def __init__(self):
super(MemEntry, self).__init__(None, None)
self.fee = None
self.startingpriority = None
self.currentpriority = None
self.time = None
self.height = None
self.leadtime = None
self.isconflict = None
self.inblock = None
def is_high_priority(self):
'''Check if entry is high priority.
Returns True if entry is considered high priority by Bitcoin Core
with regard to priority inclusion in the next block.
Ideally this should simply return
(entry.currentpriority > prioritythresh), however, currentpriority,
as obtained by RPC, uses the current height, whereas miners in forming
a new block use the current height + 1, i.e. the height of the new
block. So currentpriority underestimates the 'true' mining priority.
(There are other complications, in that currentpriority doesn't take
into account cases where the entry has mempool dependencies, but
that's a different problem, which we live with for now.)
This difference is important because, for the purposes of minfeerate
policy estimation, we need to properly exclude all high priority
transactions. Previously in v0.9 of Bitcoin Core, this wasn't such a
big problem, because low priority transactions below minrelaytxfee
are still relayed / enter the mempool; there are thus sufficient
low-fee, low-priority transactions so that the minfeerate threshold
is still estimatable in a consistent manner.
With v0.10, however, only high (miners') priority transactions are
allowed into the mempool if the tx has low fee. If one relies on the
criteria (entry.currentpriority > prioritythresh), there will be false
negatives; however because there aren't any more truly low-priority
transactions with similar feerate, the minfeerate estimation can
become inconsistent.
It's not possible, however, to adjust entry.currentpriority to become
the miners' priority, solely from the information obtained from
getrawmempool. Therefore, we resort to this hack: the entry is classed
as high priority if (entry.currentpriority > prioritythresh) or
(entry.feerate < minrelaytxfee).
'''
return (self.currentpriority > PRIORITYTHRESH or
self.feerate < MINRELAYTXFEE)
# TODO: deprecate this
def _get_attr_tuple(self):
'''Get tuple of attributes.
Used when writing MemBlock to disk.
'''
for attr in ['leadtime', 'isconflict', 'inblock']:
if getattr(self, attr) is None:
raise ValueError("MemEntry not yet processed with MemBlock.")
attr_tuple = (
self.size,
str(self.fee),
str(self.startingpriority),
str(self.currentpriority),
self.time,
self.height,
','.join(self.depends),
self.feerate,
self.leadtime,
self.isconflict,
self.inblock
)
return attr_tuple
# TODO: deprecate this
@classmethod
def _from_attr_tuple(cls, tup):
'''Return MemEntry from attribute tuple.
Used when reading MemBlock from disk.
'''
entry = cls()
(
entry.size,
entry.fee,
entry.startingpriority,
entry.currentpriority,
entry.time,
entry.height,
entry.depends,
entry.feerate,
entry.leadtime,
entry.isconflict,
entry.inblock
) = tup
entry.fee = decimal.Decimal(entry.fee)
entry.currentpriority = decimal.Decimal(entry.currentpriority)
entry.startingpriority = decimal.Decimal(entry.startingpriority)
entry.depends = entry.depends.split(',') if entry.depends else []
entry.isconflict = bool(entry.isconflict)
entry.inblock = bool(entry.inblock)
return entry
@classmethod
def from_rawentry(cls, rawentry):
'''Return MemEntry from rawmempool dict.
rawentry is a value in the dict returned by
proxy.getrawmempool(verbose=True).
'''
entry = cls()
for attr in rawentry:
setattr(entry, attr, rawentry[attr])
entry.depends = entry.depends[:]
entry.feerate = get_feerate(rawentry)
return entry
def __copy__(self):
cpy = MemEntry()
for attr in self.__dict__:
setattr(cpy, attr, getattr(self, attr))
cpy.depends = cpy.depends[:]
return cpy
def __repr__(self):
return str(self.__dict__)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return self.__dict__ != other.__dict__
def get_mempool_state():
starttime = time()
state = MempoolState(*proxy.poll_mempool())
elapsedtime = time() - starttime
time_msg = "get_mempool_state took {}s.".format(elapsedtime)
logger.debug(time_msg)
if elapsedtime > 15:
logger.warning(time_msg)
return state
|
mit
| 5,300,208,427,413,970,000
| 35.809195
| 79
| 0.548589
| false
| 4.460167
| false
| false
| false
|
mvaled/sentry
|
src/sentry/reprocessing.py
|
1
|
2078
|
from __future__ import absolute_import
import uuid
REPROCESSING_OPTION = "sentry:processing-rev"
def get_reprocessing_revision(project, cached=True):
"""Returns the current revision of the projects reprocessing config set."""
from sentry.models import ProjectOption, Project
if cached:
return ProjectOption.objects.get_value(project, REPROCESSING_OPTION)
try:
if isinstance(project, Project):
project = project.id
return ProjectOption.objects.get(project=project, key=REPROCESSING_OPTION).value
except ProjectOption.DoesNotExist:
pass
def bump_reprocessing_revision(project):
"""Bumps the reprocessing revision."""
from sentry.models import ProjectOption
rev = uuid.uuid4().hex
ProjectOption.objects.set_value(project, REPROCESSING_OPTION, rev)
return rev
def report_processing_issue(event_data, scope, object=None, type=None, data=None):
"""Reports a processing issue for a given scope and object. Per
scope/object combination only one issue can be recorded where the last
one reported wins.
"""
if object is None:
object = "*"
if type is None:
from sentry.models import EventError
type = EventError.INVALID_DATA
uid = "%s:%s" % (scope, object)
event_data.setdefault("processing_issues", {})[uid] = {
"scope": scope,
"object": object,
"type": type,
"data": data,
}
def resolve_processing_issue(project, scope, object=None, type=None):
"""Given a project, scope and object (and optionally a type) this marks
affected processing issues are resolved and kicks off a task to move
events back to reprocessing.
"""
if object is None:
object = "*"
from sentry.models import ProcessingIssue
ProcessingIssue.objects.resolve_processing_issue(
project=project, scope=scope, object=object, type=type
)
def trigger_reprocessing(project):
from sentry.tasks.reprocessing import reprocess_events
reprocess_events.delay(project_id=project.id)
|
bsd-3-clause
| -1,081,322,319,088,191,900
| 29.115942
| 88
| 0.689124
| false
| 4.131213
| false
| false
| false
|
patpatpatpatpat/pycolfin
|
pycolfin/cli.py
|
1
|
1716
|
# -*- coding: utf-8 -*-
import os
from getpass import getpass
import click
from .pycolfin import COLFin
verbosity_help = """
1 = User ID, Last Login
2 = Display all info from 1 and portfolio summary
3 = Display all info in 1 & 2 and detailed portfolio
"""
use_env_vars_help = """
Use USER_ID and PASSWORD from environment variables.
Not recommended if you are using a shared computer!
(This is like storing bank credentials in a text file)
"""
@click.command()
@click.option('--use-env-vars', is_flag=True, default=False, help=use_env_vars_help)
@click.option('-v', '--verbosity', default=3, type=click.IntRange(1, 3), help=verbosity_help)
def main(verbosity, use_env_vars):
if use_env_vars:
try:
user_id = os.environ['USER_ID']
password = os.environ['PASSWORD']
except KeyError:
click.echo('USER_ID and PASSWORD not found in environment variables!')
exit()
else:
user_id = getpass(prompt='User ID:')
password = getpass(prompt='Password:')
try:
account = COLFin(user_id, password, parser='html.parser')
except Exception as e:
click.echo(e.__str__())
exit()
if verbosity >= 1:
account.fetch_account_summary()
if verbosity >= 2:
account.fetch_portfolio_summary()
account.show_portfolio_summary()
if verbosity == 3:
account.fetch_detailed_portfolio()
try:
account.show_detailed_stocks()
except Exception as e:
print(e)
try:
account.show_detailed_mutual_fund()
except Exception as e:
print(e)
account.show_account_summary()
if __name__ == "__main__":
main()
|
mit
| -6,550,584,323,671,788,000
| 26.677419
| 93
| 0.61655
| false
| 3.771429
| false
| false
| false
|
3324fr/spinalcordtoolbox
|
dev/sct_register_multimodal_old/msct_register.py
|
1
|
35723
|
#!/usr/bin/env python
#
#
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2015 NeuroPoly, Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Tanguy Magnan
# Modified: 2015-07-29
#
# License: see the LICENSE.TXT
#=======================================================================================================================
#
import sys, commands
# Get path of the toolbox
status, path_sct = commands.getstatusoutput('echo $SCT_DIR')
# Append path that contains scripts, to be able to load modules
sys.path.append(path_sct + '/scripts')
from sct_register_multimodal import Paramreg
def register_slicereg2d_pointwise(fname_source, fname_dest, window_length=31, paramreg=Paramreg(step='0', type='seg', algo='slicereg2d_pointwise', metric='MeanSquares', iter='10', shrink='1', smooth='0', gradStep='0.5'),
warp_forward_out='step0Warp.nii.gz', warp_inverse_out='step0InverseWarp.nii.gz', factor=2, verbose=0):
"""Slice-by-slice regularized registration by translation of two segmentations.
First we estimate for each slice the translation vector by calculating the difference of position of the two centers of
mass of the two segmentations. Then we remove outliers using Median Absolute Deviation technique (MAD) and smooth
the translation along x and y axis using moving average hanning window. Eventually, we generate two warping fields
(forward and inverse) resulting from this regularized registration technique.
The segmentations must be of same size (otherwise generate_warping_field will not work for forward or inverse
creation).
input:
fname_source: name of moving image (type: string)
fname_dest: name of fixed image (type: string)
window_length: size of window for moving average smoothing (type: int)
paramreg[optional]: parameters of antsRegistration (type: Paramreg class from sct_register_multimodal)
warp_forward_out: name of output forward warp (type: string)
warp_inverse_out: name of output inverse warp (type: string)
factor: sensibility factor for outlier detection (higher the factor, smaller the detection) (type: int or float)
verbose: display parameter (type: int, value: 0,1 or 2)
output:
creation of warping field files of name 'warp_forward_out' and 'warp_inverse_out'.
"""
if paramreg.type != 'seg':
print '\nERROR: Algorithm slicereg2d_pointwise only operates for segmentation type.'
sys.exit(2)
else:
from msct_register_regularized import register_seg, generate_warping_field
from numpy import asarray
from msct_smooth import smoothing_window, outliers_detection, outliers_completion
# Calculate displacement
x_disp, y_disp = register_seg(fname_source, fname_dest)
# Change to array
x_disp_a = asarray(x_disp)
y_disp_a = asarray(y_disp)
# Detect outliers
mask_x_a = outliers_detection(x_disp_a, type='median', factor=factor, return_filtered_signal='no', verbose=verbose)
mask_y_a = outliers_detection(y_disp_a, type='median', factor=factor, return_filtered_signal='no', verbose=verbose)
# Replace value of outliers by linear interpolation using closest non-outlier points
x_disp_a_no_outliers = outliers_completion(mask_x_a, verbose=0)
y_disp_a_no_outliers = outliers_completion(mask_y_a, verbose=0)
# Smooth results
x_disp_smooth = smoothing_window(x_disp_a_no_outliers, window_len=int(window_length), window='hanning', verbose=verbose)
y_disp_smooth = smoothing_window(y_disp_a_no_outliers, window_len=int(window_length), window='hanning', verbose=verbose)
# Generate warping field
generate_warping_field(fname_dest, x_disp_smooth, y_disp_smooth, fname=warp_forward_out) #name_warp= 'step'+str(paramreg.step)
# Inverse warping field
generate_warping_field(fname_source, -x_disp_smooth, -y_disp_smooth, fname=warp_inverse_out)
def register_slicereg2d_translation(fname_source, fname_dest, window_length=31, paramreg=Paramreg(step='0', type='im', algo='Translation', metric='MeanSquares', iter='10', shrink='1', smooth='0', gradStep='0.5'),
fname_mask='', warp_forward_out='step0Warp.nii.gz', warp_inverse_out='step0InverseWarp.nii.gz', factor=2, remove_temp_files=1, verbose=0,
ants_registration_params={'rigid': '', 'affine': '', 'compositeaffine': '', 'similarity': '', 'translation': '','bspline': ',10', 'gaussiandisplacementfield': ',3,0',
'bsplinedisplacementfield': ',5,10', 'syn': ',3,0', 'bsplinesyn': ',1,3'}):
"""Slice-by-slice regularized registration by translation of two images.
We first register slice-by-slice the two images using antsRegistration in 2D. Then we remove outliers using
Median Absolute Deviation technique (MAD) and smooth the translations along x and y axis using moving average
hanning window. Eventually, we generate two warping fields (forward and inverse) resulting from this regularized
registration technique.
The images must be of same size (otherwise generate_warping_field will not work for forward or inverse
creation).
input:
fname_source: name of moving image (type: string)
fname_dest: name of fixed image (type: string)
window_length[optional]: size of window for moving average smoothing (type: int)
paramreg[optional]: parameters of antsRegistration (type: Paramreg class from sct_register_multimodal)
fname_mask[optional]: name of mask file (type: string) (parameter -x of antsRegistration)
warp_forward_out[optional]: name of output forward warp (type: string)
warp_inverse_out[optional]: name of output inverse warp (type: string)
factor[optional]: sensibility factor for outlier detection (higher the factor, smaller the detection)
(type: int or float)
remove_temp_files[optional]: 1 to remove, 0 to keep (type: int)
verbose[optional]: display parameter (type: int, value: 0,1 or 2)
ants_registration_params[optional]: specific algorithm's parameters for antsRegistration (type: dictionary)
output:
creation of warping field files of name 'warp_forward_out' and 'warp_inverse_out'.
"""
from msct_register_regularized import register_images, generate_warping_field
from numpy import asarray
from msct_smooth import smoothing_window, outliers_detection, outliers_completion
# Calculate displacement
x_disp, y_disp = register_images(fname_source, fname_dest, mask=fname_mask, paramreg=paramreg, remove_tmp_folder=remove_temp_files, ants_registration_params=ants_registration_params)
# Change to array
x_disp_a = asarray(x_disp)
y_disp_a = asarray(y_disp)
# Detect outliers
mask_x_a = outliers_detection(x_disp_a, type='median', factor=factor, return_filtered_signal='no', verbose=verbose)
mask_y_a = outliers_detection(y_disp_a, type='median', factor=factor, return_filtered_signal='no', verbose=verbose)
# Replace value of outliers by linear interpolation using closest non-outlier points
x_disp_a_no_outliers = outliers_completion(mask_x_a, verbose=0)
y_disp_a_no_outliers = outliers_completion(mask_y_a, verbose=0)
# Smooth results
x_disp_smooth = smoothing_window(x_disp_a_no_outliers, window_len=int(window_length), window='hanning', verbose = verbose)
y_disp_smooth = smoothing_window(y_disp_a_no_outliers, window_len=int(window_length), window='hanning', verbose = verbose)
# Generate warping field
generate_warping_field(fname_dest, x_disp_smooth, y_disp_smooth, fname=warp_forward_out)
# Inverse warping field
generate_warping_field(fname_source, -x_disp_smooth, -y_disp_smooth, fname=warp_inverse_out)
def register_slicereg2d_rigid(fname_source, fname_dest, window_length=31, paramreg=Paramreg(step='0', type='im', algo='Rigid', metric='MeanSquares', iter='10', shrink='1', smooth='0', gradStep='0.5'),
fname_mask='', warp_forward_out='step0Warp.nii.gz', warp_inverse_out='step0InverseWarp.nii.gz', factor=2, remove_temp_files=1, verbose=0,
ants_registration_params={'rigid': '', 'affine': '', 'compositeaffine': '', 'similarity': '', 'translation': '','bspline': ',10', 'gaussiandisplacementfield': ',3,0',
'bsplinedisplacementfield': ',5,10', 'syn': ',3,0', 'bsplinesyn': ',1,3'}):
"""Slice-by-slice regularized registration (rigid) of two images.
We first register slice-by-slice the two images using antsRegistration in 2D. Then we remove outliers using
Median Absolute Deviation technique (MAD) and smooth the translations and angle of rotation along x and y axis using
moving average hanning window. Eventually, we generate two warping fields (forward and inverse) resulting from this
regularized registration technique.
The images must be of same size (otherwise generate_warping_field will not work for forward or inverse
creation).
input:
fname_source: name of moving image (type: string)
fname_dest: name of fixed image (type: string)
window_length[optional]: size of window for moving average smoothing (type: int)
paramreg[optional]: parameters of antsRegistration (type: Paramreg class from sct_register_multimodal)
fname_mask[optional]: name of mask file (type: string) (parameter -x of antsRegistration)
warp_forward_out[optional]: name of output forward warp (type: string)
warp_inverse_out[optional]: name of output inverse warp (type: string)
factor[optional]: sensibility factor for outlier detection (higher the factor, smaller the detection)
(type: int or float)
remove_temp_files[optional]: 1 to remove, 0 to keep (type: int)
verbose[optional]: display parameter (type: int, value: 0,1 or 2)
ants_registration_params[optional]: specific algorithm's parameters for antsRegistration (type: dictionary)
output:
creation of warping field files of name 'warp_forward_out' and 'warp_inverse_out'.
"""
from msct_register_regularized import register_images, generate_warping_field
from numpy import asarray
from msct_smooth import smoothing_window, outliers_detection, outliers_completion
# Calculate displacement
x_disp, y_disp, theta_rot = register_images(fname_source, fname_dest, mask=fname_mask, paramreg=paramreg, remove_tmp_folder=remove_temp_files, ants_registration_params=ants_registration_params)
# Change to array
x_disp_a = asarray(x_disp)
y_disp_a = asarray(y_disp)
theta_rot_a = asarray(theta_rot)
# Detect outliers
mask_x_a = outliers_detection(x_disp_a, type='median', factor=factor, return_filtered_signal='no', verbose=verbose)
mask_y_a = outliers_detection(y_disp_a, type='median', factor=factor, return_filtered_signal='no', verbose=verbose)
mask_theta_a = outliers_detection(theta_rot_a, type='median', factor=2, return_filtered_signal='no', verbose=verbose)
# Replace value of outliers by linear interpolation using closest non-outlier points
x_disp_a_no_outliers = outliers_completion(mask_x_a, verbose=0)
y_disp_a_no_outliers = outliers_completion(mask_y_a, verbose=0)
theta_rot_a_no_outliers = outliers_completion(mask_theta_a, verbose=0)
# Smooth results
x_disp_smooth = smoothing_window(x_disp_a_no_outliers, window_len=int(window_length), window='hanning', verbose = verbose)
y_disp_smooth = smoothing_window(y_disp_a_no_outliers, window_len=int(window_length), window='hanning', verbose = verbose)
theta_rot_smooth = smoothing_window(theta_rot_a_no_outliers, window_len=int(window_length), window='hanning', verbose = verbose)
# Generate warping field
generate_warping_field(fname_dest, x_disp_smooth, y_disp_smooth, theta_rot_smooth, fname=warp_forward_out)
# Inverse warping field
generate_warping_field(fname_source, -x_disp_smooth, -y_disp_smooth, -theta_rot_smooth, fname=warp_inverse_out)
def register_slicereg2d_affine(fname_source, fname_dest, window_length=31, paramreg=Paramreg(step='0', type='im', algo='Affine', metric='MeanSquares', iter='10', shrink='1', smooth='0', gradStep='0.5'),
fname_mask='', warp_forward_out='step0Warp.nii.gz', warp_inverse_out='step0InverseWarp.nii.gz', factor=2, remove_temp_files=1, verbose=0,
ants_registration_params={'rigid': '', 'affine': '', 'compositeaffine': '', 'similarity': '', 'translation': '','bspline': ',10', 'gaussiandisplacementfield': ',3,0',
'bsplinedisplacementfield': ',5,10', 'syn': ',3,0', 'bsplinesyn': ',1,3'}):
"""Slice-by-slice regularized registration (affine) of two images.
We first register slice-by-slice the two images using antsRegistration in 2D (algo: affine) and create 3D warping
fields (forward and inverse) by merging the 2D warping fields along z. Then we directly detect outliers and smooth
the 3d warping fields applying a moving average hanning window on each pixel of the plan xOy (i.e. we consider that
for a position (x,y) in the plan xOy, the variation along z of the vector of displacement (xo, yo, zo) of the
warping field should not be too abrupt). Eventually, we generate two warping fields (forward and inverse) resulting
from this regularized registration technique.
The images must be of same size (otherwise generate_warping_field will not work for forward or inverse
creation).
input:
fname_source: name of moving image (type: string)
fname_dest: name of fixed image (type: string)
window_length[optional]: size of window for moving average smoothing (type: int)
paramreg[optional]: parameters of antsRegistration (type: Paramreg class from sct_register_multimodal)
fname_mask[optional]: name of mask file (type: string) (parameter -x of antsRegistration)
warp_forward_out[optional]: name of output forward warp (type: string)
warp_inverse_out[optional]: name of output inverse warp (type: string)
factor[optional]: sensibility factor for outlier detection (higher the factor, smaller the detection)
(type: int or float)
remove_temp_files[optional]: 1 to remove, 0 to keep (type: int)
verbose[optional]: display parameter (type: int, value: 0,1 or 2)
ants_registration_params[optional]: specific algorithm's parameters for antsRegistration (type: dictionary)
output:
creation of warping field files of name 'warp_forward_out' and 'warp_inverse_out'.
"""
from nibabel import load, Nifti1Image, save
from msct_smooth import smoothing_window, outliers_detection, outliers_completion
from msct_register_regularized import register_images
from numpy import apply_along_axis, zeros
import sct_utils as sct
name_warp_syn = 'Warp_total_step_'+str(paramreg.step) # 'Warp_total'
# Calculate displacement
register_images(fname_source, fname_dest, mask=fname_mask, paramreg=paramreg, remove_tmp_folder=remove_temp_files, ants_registration_params=ants_registration_params)
print'\nRegularizing warping fields along z axis...'
print'\n\tSplitting warping fields ...'
# sct.run('isct_c3d -mcs ' + name_warp_syn + '.nii.gz -oo ' + name_warp_syn + '_x.nii.gz ' + name_warp_syn + '_y.nii.gz')
# sct.run('isct_c3d -mcs ' + name_warp_syn + '_inverse.nii.gz -oo ' + name_warp_syn + '_x_inverse.nii.gz ' + name_warp_syn + '_y_inverse.nii.gz')
sct.run('sct_maths -i ' + name_warp_syn + '.nii.gz -w -mcs -o ' + name_warp_syn + '_x.nii.gz,' + name_warp_syn + '_y.nii.gz')
sct.run('sct_maths -i ' + name_warp_syn + '_inverse.nii.gz -w -mcs -o ' + name_warp_syn + '_x_inverse.nii.gz,' + name_warp_syn + '_y_inverse.nii.gz')
data_warp_x = load(name_warp_syn + '_x.nii.gz').get_data()
data_warp_y = load(name_warp_syn + '_y.nii.gz').get_data()
hdr_warp = load(name_warp_syn + '_x.nii.gz').get_header()
data_warp_x_inverse = load(name_warp_syn + '_x_inverse.nii.gz').get_data()
data_warp_y_inverse = load(name_warp_syn + '_y_inverse.nii.gz').get_data()
hdr_warp_inverse = load(name_warp_syn + '_x_inverse.nii.gz').get_header()
#Outliers deletion
print'\n\tDeleting outliers...'
mask_x_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_x)
mask_y_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_y)
mask_x_inverse_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_x_inverse)
mask_y_inverse_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_y_inverse)
#Outliers replacement by linear interpolation using closest non-outlier points
data_warp_x_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_x_a)
data_warp_y_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_y_a)
data_warp_x_inverse_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_x_inverse_a)
data_warp_y_inverse_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_y_inverse_a)
#Smoothing of results along z
print'\n\tSmoothing results...'
data_warp_x_smooth = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_x_no_outliers)
data_warp_x_smooth_inverse = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_x_inverse_no_outliers)
data_warp_y_smooth = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_y_no_outliers)
data_warp_y_smooth_inverse = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_y_inverse_no_outliers)
print'\nSaving regularized warping fields...'
'''
from sct_maths import multicomponent_merge
from msct_image import Image
data_warp_smooth = multicomponent_merge([data_warp_x_smooth, data_warp_y_smooth])[0]
hdr_warp.set_intent('vector', (), '')
warp_smooth = Image(param=data_warp_smooth, absolutepath=warp_forward_out, hdr=hdr_warp)
warp_smooth.save()
data_warp_smooth_inverse = multicomponent_merge([data_warp_x_smooth_inverse, data_warp_y_smooth_inverse])[0]
hdr_warp_inverse.set_intent('vector', (), '')
warp_smooth_inverse = Image(param=data_warp_smooth_inverse, absolutepath=warp_inverse_out, hdr=hdr_warp_inverse)
warp_smooth_inverse.save()
'''
#Get image dimensions of destination image
from msct_image import Image
nx, ny, nz, nt, px, py, pz, pt = Image(fname_dest).dim
data_warp_smooth = zeros(((((nx, ny, nz, 1, 3)))))
data_warp_smooth[:,:,:,0,0] = data_warp_x_smooth
data_warp_smooth[:,:,:,0,1] = data_warp_y_smooth
data_warp_smooth_inverse = zeros(((((nx, ny, nz, 1, 3)))))
data_warp_smooth_inverse[:,:,:,0,0] = data_warp_x_smooth_inverse
data_warp_smooth_inverse[:,:,:,0,1] = data_warp_y_smooth_inverse
# Force header's parameter to intent so that the file may be recognised as a warping field by ants
hdr_warp.set_intent('vector', (), '')
hdr_warp_inverse.set_intent('vector', (), '')
img = Nifti1Image(data_warp_smooth, None, header=hdr_warp)
img_inverse = Nifti1Image(data_warp_smooth_inverse, None, header=hdr_warp_inverse)
save(img, filename=warp_forward_out)
print'\tFile ' + warp_forward_out + ' saved.'
save(img_inverse, filename=warp_inverse_out)
print'\tFile ' + warp_inverse_out + ' saved.'
def register_slicereg2d_syn(fname_source, fname_dest, window_length=31, paramreg=Paramreg(step='0', type='im', algo='SyN', metric='MeanSquares', iter='10', shrink='1', smooth='0', gradStep='0.5'),
fname_mask='', warp_forward_out='step0Warp.nii.gz', warp_inverse_out='step0InverseWarp.nii.gz', factor=2, remove_temp_files=1, verbose=0,
ants_registration_params={'rigid': '', 'affine': '', 'compositeaffine': '', 'similarity': '', 'translation': '','bspline': ',10', 'gaussiandisplacementfield': ',3,0',
'bsplinedisplacementfield': ',5,10', 'syn': ',3,0', 'bsplinesyn': ',1,3'}):
"""Slice-by-slice regularized registration (syn) of two images.
We first register slice-by-slice the two images using antsRegistration in 2D (algo: syn) and create 3D warping
fields (forward and inverse) by merging the 2D warping fields along z. Then we directly detect outliers and smooth
the 3d warping fields applying a moving average hanning window on each pixel of the plan xOy (i.e. we consider that
for a position (x,y) in the plan xOy, the variation along z of the vector of displacement (xo, yo, zo) of the
warping field should not be too abrupt). Eventually, we generate two warping fields (forward and inverse) resulting
from this regularized registration technique.
The images must be of same size (otherwise generate_warping_field will not work for forward or inverse
creation).
input:
fname_source: name of moving image (type: string)
fname_dest: name of fixed image (type: string)
window_length[optional]: size of window for moving average smoothing (type: int)
paramreg[optional]: parameters of antsRegistration (type: Paramreg class from sct_register_multimodal)
fname_mask[optional]: name of mask file (type: string) (parameter -x of antsRegistration)
warp_forward_out[optional]: name of output forward warp (type: string)
warp_inverse_out[optional]: name of output inverse warp (type: string)
factor[optional]: sensibility factor for outlier detection (higher the factor, smaller the detection)
(type: int or float)
remove_temp_files[optional]: 1 to remove, 0 to keep (type: int)
verbose[optional]: display parameter (type: int, value: 0,1 or 2)
ants_registration_params[optional]: specific algorithm's parameters for antsRegistration (type: dictionary)
output:
creation of warping field files of name 'warp_forward_out' and 'warp_inverse_out'.
"""
from nibabel import load, Nifti1Image, save
from msct_smooth import smoothing_window, outliers_detection, outliers_completion
from msct_register_regularized import register_images
from numpy import apply_along_axis, zeros
import sct_utils as sct
name_warp_syn = 'Warp_total'
# Registrating images
register_images(fname_source, fname_dest, mask=fname_mask, paramreg=paramreg, remove_tmp_folder=remove_temp_files, ants_registration_params=ants_registration_params)
print'\nRegularizing warping fields along z axis...'
print'\n\tSplitting warping fields ...'
# sct.run('isct_c3d -mcs ' + name_warp_syn + '.nii.gz -oo ' + name_warp_syn + '_x.nii.gz ' + name_warp_syn + '_y.nii.gz')
# sct.run('isct_c3d -mcs ' + name_warp_syn + '_inverse.nii.gz -oo ' + name_warp_syn + '_x_inverse.nii.gz ' + name_warp_syn + '_y_inverse.nii.gz')
sct.run('sct_maths -i ' + name_warp_syn + '.nii.gz -w -mcs -o ' + name_warp_syn + '_x.nii.gz,' + name_warp_syn + '_y.nii.gz')
sct.run('sct_maths -i ' + name_warp_syn + '_inverse.nii.gz -w -mcs -o ' + name_warp_syn + '_x_inverse.nii.gz,' + name_warp_syn + '_y_inverse.nii.gz')
im_warp_x = Image(name_warp_syn + '_x.nii.gz')
data_warp_x = im_warp_x.data
im_warp_y = Image(name_warp_syn + '_y.nii.gz')
data_warp_y = im_warp_y.data
hdr_warp = im_warp_x.hdr
# data_warp_x = load(name_warp_syn + '_x.nii.gz').get_data()
# data_warp_y = load(name_warp_syn + '_y.nii.gz').get_data()
# hdr_warp = load(name_warp_syn + '_x.nii.gz').get_header()
im_warp_x_inverse = Image(name_warp_syn + '_x_inverse.nii.gz')
data_warp_x_inverse = im_warp_x_inverse.data
im_warp_y_inverse = Image(name_warp_syn + '_y_inverse.nii.gz')
data_warp_y_inverse = im_warp_y_inverse.data
hdr_warp_inverse = im_warp_x_inverse.hdr
# data_warp_x_inverse = load(name_warp_syn + '_x_inverse.nii.gz').get_data()
# data_warp_y_inverse = load(name_warp_syn + '_y_inverse.nii.gz').get_data()
# hdr_warp_inverse = load(name_warp_syn + '_x_inverse.nii.gz').get_header()
#Outliers deletion
print'\n\tDeleting outliers...'
mask_x_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_x)
mask_y_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_y)
mask_x_inverse_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_x_inverse)
mask_y_inverse_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_y_inverse)
#Outliers replacement by linear interpolation using closest non-outlier points
data_warp_x_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_x_a)
data_warp_y_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_y_a)
data_warp_x_inverse_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_x_inverse_a)
data_warp_y_inverse_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_y_inverse_a)
#Smoothing of results along z
print'\n\tSmoothing results...'
data_warp_x_smooth = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_x_no_outliers)
data_warp_x_smooth_inverse = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_x_inverse_no_outliers)
data_warp_y_smooth = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_y_no_outliers)
data_warp_y_smooth_inverse = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_y_inverse_no_outliers)
print'\nSaving regularized warping fields...'
#Get image dimensions of destination image
from msct_image import Image
nx, ny, nz, nt, px, py, pz, pt = Image(fname_dest).dim
data_warp_smooth = zeros(((((nx, ny, nz, 1, 3)))))
data_warp_smooth[:,:,:,0,0] = data_warp_x_smooth
data_warp_smooth[:,:,:,0,1] = data_warp_y_smooth
data_warp_smooth_inverse = zeros(((((nx, ny, nz, 1, 3)))))
data_warp_smooth_inverse[:,:,:,0,0] = data_warp_x_smooth_inverse
data_warp_smooth_inverse[:,:,:,0,1] = data_warp_y_smooth_inverse
# Force header's parameter to intent so that the file may be recognised as a warping field by ants
hdr_warp.set_intent('vector', (), '')
hdr_warp_inverse.set_intent('vector', (), '')
img = Nifti1Image(data_warp_smooth, None, header=hdr_warp)
img_inverse = Nifti1Image(data_warp_smooth_inverse, None, header=hdr_warp_inverse)
save(img, filename=warp_forward_out)
print'\tFile ' + warp_forward_out + ' saved.'
save(img_inverse, filename=warp_inverse_out)
print'\tFile ' + warp_inverse_out + ' saved.'
def register_slicereg2d_bsplinesyn(fname_source, fname_dest, window_length=31, paramreg=Paramreg(step='0', type='im', algo='BSplineSyN', metric='MeanSquares', iter='10', shrink='1', smooth='0', gradStep='0.5'),
fname_mask='', warp_forward_out='step0Warp.nii.gz', warp_inverse_out='step0InverseWarp.nii.gz', factor=2, remove_temp_files=1, verbose=0,
ants_registration_params={'rigid': '', 'affine': '', 'compositeaffine': '', 'similarity': '', 'translation': '','bspline': ',10', 'gaussiandisplacementfield': ',3,0',
'bsplinedisplacementfield': ',5,10', 'syn': ',3,0', 'bsplinesyn': ',1,3'}):
"""Slice-by-slice regularized registration (bsplinesyn) of two images.
We first register slice-by-slice the two images using antsRegistration in 2D (algo: bsplinesyn) and create 3D warping
fields (forward and inverse) by merging the 2D warping fields along z. Then we directly detect outliers and smooth
the 3d warping fields applying a moving average hanning window on each pixel of the plan xOy (i.e. we consider that
for a position (x,y) in the plan xOy, the variation along z of the vector of displacement (xo, yo, zo) of the
warping field should not be too abrupt). Eventually, we generate two warping fields (forward and inverse) resulting
from this regularized registration technique.
The images must be of same size (otherwise generate_warping_field will not work for forward or inverse
creation).
input:
fname_source: name of moving image (type: string)
fname_dest: name of fixed image (type: string)
window_length[optional]: size of window for moving average smoothing (type: int)
paramreg[optional]: parameters of antsRegistration (type: Paramreg class from sct_register_multimodal)
fname_mask[optional]: name of mask file (type: string) (parameter -x of antsRegistration)
warp_forward_out[optional]: name of output forward warp (type: string)
warp_inverse_out[optional]: name of output inverse warp (type: string)
factor[optional]: sensibility factor for outlier detection (higher the factor, smaller the detection)
(type: int or float)
remove_temp_files[optional]: 1 to remove, 0 to keep (type: int)
verbose[optional]: display parameter (type: int, value: 0,1 or 2)
ants_registration_params[optional]: specific algorithm's parameters for antsRegistration (type: dictionary)
output:
creation of warping field files of name 'warp_forward_out' and 'warp_inverse_out'.
"""
from nibabel import load, Nifti1Image, save
from msct_smooth import smoothing_window, outliers_detection, outliers_completion
from msct_register_regularized import register_images
from numpy import apply_along_axis, zeros
import sct_utils as sct
from msct_image import Image
name_warp_syn = 'Warp_total'
# Registrating images
register_images(fname_source, fname_dest, mask=fname_mask, paramreg=paramreg, remove_tmp_folder=remove_temp_files, ants_registration_params=ants_registration_params)
print'\nRegularizing warping fields along z axis...'
print'\n\tSplitting warping fields ...'
# sct.run('isct_c3d -mcs ' + name_warp_syn + '.nii.gz -oo ' + name_warp_syn + '_x.nii.gz ' + name_warp_syn + '_y.nii.gz')
# sct.run('isct_c3d -mcs ' + name_warp_syn + '_inverse.nii.gz -oo ' + name_warp_syn + '_x_inverse.nii.gz ' + name_warp_syn + '_y_inverse.nii.gz')
sct.run('sct_maths -i ' + name_warp_syn + '.nii.gz -w -mcs -o ' + name_warp_syn + '_x.nii.gz,' + name_warp_syn + '_y.nii.gz')
sct.run('sct_maths -i ' + name_warp_syn + '_inverse.nii.gz -w -mcs -o ' + name_warp_syn + '_x_inverse.nii.gz,' + name_warp_syn + '_y_inverse.nii.gz')
data_warp_x = load(name_warp_syn + '_x.nii.gz').get_data()
data_warp_y = load(name_warp_syn + '_y.nii.gz').get_data()
hdr_warp = load(name_warp_syn + '_x.nii.gz').get_header()
data_warp_x_inverse = load(name_warp_syn + '_x_inverse.nii.gz').get_data()
data_warp_y_inverse = load(name_warp_syn + '_y_inverse.nii.gz').get_data()
hdr_warp_inverse = load(name_warp_syn + '_x_inverse.nii.gz').get_header()
#Outliers deletion
print'\n\tDeleting outliers...'
mask_x_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_x)
mask_y_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_y)
mask_x_inverse_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_x_inverse)
mask_y_inverse_a = apply_along_axis(lambda m: outliers_detection(m, type='median', factor=factor, return_filtered_signal='no', verbose=0), axis=-1, arr=data_warp_y_inverse)
#Outliers replacement by linear interpolation using closest non-outlier points
data_warp_x_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_x_a)
data_warp_y_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_y_a)
data_warp_x_inverse_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_x_inverse_a)
data_warp_y_inverse_no_outliers = apply_along_axis(lambda m: outliers_completion(m, verbose=0), axis=-1, arr=mask_y_inverse_a)
#Smoothing of results along z
print'\n\tSmoothing results...'
data_warp_x_smooth = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_x_no_outliers)
data_warp_x_smooth_inverse = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_x_inverse_no_outliers)
data_warp_y_smooth = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_y_no_outliers)
data_warp_y_smooth_inverse = apply_along_axis(lambda m: smoothing_window(m, window_len=int(window_length), window='hanning', verbose=0), axis=-1, arr=data_warp_y_inverse_no_outliers)
print'\nSaving regularized warping fields...'
#Get image dimensions of destination image
nx, ny, nz, nt, px, py, pz, pt = Image(fname_dest).dim
data_warp_smooth = zeros(((((nx, ny, nz, 1, 3)))))
data_warp_smooth[:,:,:,0,0] = data_warp_x_smooth
data_warp_smooth[:,:,:,0,1] = data_warp_y_smooth
data_warp_smooth_inverse = zeros(((((nx, ny, nz, 1, 3)))))
data_warp_smooth_inverse[:,:,:,0,0] = data_warp_x_smooth_inverse
data_warp_smooth_inverse[:,:,:,0,1] = data_warp_y_smooth_inverse
# Force header's parameter to intent so that the file may be recognised as a warping field by ants
hdr_warp.set_intent('vector', (), '')
hdr_warp_inverse.set_intent('vector', (), '')
img = Nifti1Image(data_warp_smooth, None, header=hdr_warp)
img_inverse = Nifti1Image(data_warp_smooth_inverse, None, header=hdr_warp_inverse)
save(img, filename=warp_forward_out)
print'\tFile ' + warp_forward_out + ' saved.'
save(img_inverse, filename=warp_inverse_out)
print'\tFile ' + warp_inverse_out + ' saved.'
|
mit
| 6,486,023,166,097,411,000
| 72.807851
| 220
| 0.683229
| false
| 3.341721
| false
| false
| false
|
divyamamgai/UdacityProjectMovieTrailerWebsite
|
media.py
|
1
|
3327
|
import webbrowser
import re
import urllib.request
import urllib.parse
import json
# as keyword creates an alias for xml.etree.ElementTree as ET, so we can use ET to reference xml.etree.ElementTree.
import xml.etree.ElementTree as eT
class Movie:
def __init__(self, title, plot, poster, trailer_url):
self.title = title
self.plot = plot
self.poster_url = poster
self.trailer_url = trailer_url
def show_trailer(self):
# Opens the Web Browser with the trailer URL of the movie.
webbrowser.open(self.trailer_url)
@staticmethod
def generate_traileraddict_id(title):
# Firstly strip the title to remove excess white spaces surrounding the text.
# Secondly remove all the non-alphabet and non-numeric characters from the title.
# Thirdly convert the result to lowercase and convert all the white spaces (even the groups) to dash.
return re.sub(r"(\s|-)+", "-", (re.sub(r"[^a-zA-Z0-9\s\-]", "", title.strip())).lower())
@staticmethod
def initialize_from_title(title, traileraddict_trailer_type="trailer"):
print("Requesting information for the movie '" + title + "' from omdb...")
# Make API request to omdb to get movie information.
omdb_api_connection = urllib.request.urlopen("http://www.omdbapi.com/?t=" + urllib.parse.quote(title))
omdb_api_response = omdb_api_connection.read()
omdb_api_connection.close()
# http.client.HTTPResponse.read() returns raw bytes which is needed to be converted
# to string using UTF-8 encoding.
omdb_movie_data = json.loads(omdb_api_response.decode("utf-8"))
# Check whether the movie was found or not.
if omdb_movie_data["Response"] == "True":
print("Movie information found successfully!")
print("Requesting trailer for the movie '" + title + "' from Trailer Addict...")
# Make API request to Trailer Addict to get movie trailer.
traileraddict_api_connection = urllib.request.urlopen("http://simpleapi.traileraddict.com/" +
Movie.generate_traileraddict_id(title) +
"/" + traileraddict_trailer_type)
traileraddict_api_response = traileraddict_api_connection.read()
traileraddict_api_connection.close()
# Parse XML returned as response from the Trailer Addict API.
traileraddict_xml_root = eT.fromstring(traileraddict_api_response.decode("utf-8"))
# In the Trailer Addict Simple API first element of the root is trailer which
# contains the desired movie data. Inside that trailer element is an tag, embed_standard,
# which contains embed HTML code for the trailer. We parse the embed HTML code to get
# the movie trailer URL from the src attribute of the iframe element.
trailer_url = eT.fromstring(traileraddict_xml_root[0].find("embed_standard").text).attrib["src"]
print("Movie trailer found successfully!")
movie = Movie(title, omdb_movie_data["Plot"], omdb_movie_data["Poster"], trailer_url)
return movie
else:
print("Movie not found!")
return None
|
mit
| 3,126,210,087,341,528,000
| 54.45
| 115
| 0.637211
| false
| 4.153558
| false
| false
| false
|
mattsmart/biomodels
|
transcriptome_clustering/spectrums.py
|
1
|
9188
|
import matplotlib.pyplot as plt
import numpy as np
import os
from inference import choose_J_from_general_form, infer_interactions, error_fn
from settings import FOLDER_OUTPUT
from visualize_matrix import plot_matrix
def get_spectrum_from_arr(J, real=True, sort=True):
# TODO deal with massive complex part if necessary
eig, V = np.linalg.eig(J)
if real:
eig = np.real(eig)
if sort:
eig = np.sort(eig)
return eig
def get_spectrums(C, D, num_spectrums=10, method='U_data', print_errors=True):
"""
Returns J's (generated from method) and their spectrums and their labels
J's returned as list of arrays
Shape is num_spectrums X dim_spectrum
"""
assert method in ['U_data', 'U_lyap', 'infer_data', 'infer_lyap']
spectrums = np.zeros((num_spectrums, D.shape[0]))
list_of_J = [0]*num_spectrums
# generate spectrum labels
if method[0] == 'U':
scales = np.linspace(0, 0.009, num_spectrums)
labels = ['scale_%.3f' % i for i in scales]
else:
alphas = np.logspace(-10, -1, num_spectrums)
labels = ['alpha_%.2e' % a for a in alphas]
for idx in xrange(num_spectrums):
if method[0] == 'U':
J = choose_J_from_general_form(C, D, scale=scales[idx])
else:
J = infer_interactions(C, D, alpha=alphas[idx])
if print_errors:
err = error_fn(C, D, J)
print "Error in method %s, idx %d, is %.3f (alpha=%.2e)" % (method, idx, err, alphas[idx])
list_of_J[idx] = J
spectrums[idx, :] = get_spectrum_from_arr(J, real=True)
return list_of_J, spectrums, labels
def get_J_truncated_spectrum(J, idx):
"""
Given an idx, removes row/col idx of J and computes the spectrum of the new (n-1)*(n-1) array
"""
J_reduce = J.copy()
J_reduce = np.delete(J_reduce, (idx), axis=0)
J_reduce = np.delete(J_reduce, (idx), axis=1)
return get_spectrum_from_arr(J_reduce, real=True)
def scan_J_truncations(J, verbose=False, spectrum_unperturbed=None):
"""
Given a Jacobian matrix J
(1) compute the spectrum
(2) assess if the spectrum is a suitable starting point
(3) iteratively delete all row/col pairs and compute spectrum of each
(4) for each row/col pair, report if the spectrum has been sufficiently perturbed
"""
assert J.shape[0] == J.shape[1]
n = J.shape[0]
if spectrum_unperturbed is None:
spectrum_unperturbed = get_spectrum_from_arr(J, real=True)
spectrums_perturbed = np.zeros((n, n-1))
if verbose:
print 'unperturbed', '\n', spectrum_unperturbed
for idx in xrange(n):
spectrum_idx = get_J_truncated_spectrum(J, idx)
spectrums_perturbed[idx, :] = spectrum_idx
if verbose:
print idx, '\n', spectrum_idx
return spectrum_unperturbed, spectrums_perturbed
def gene_control_scores(spectrum_unperturbed, spectrums_perturbed, fixed_denom=None, use_min=True):
"""
See Sid 2018 draft for idea
"""
if use_min:
cg = np.min(spectrums_perturbed, axis=1) # numerator left term
cg = cg - np.min(spectrum_unperturbed) # numerator full
else:
cg = np.max(spectrums_perturbed, axis=1) # numerator left term
cg = np.max(spectrum_unperturbed) - cg # numerator full (note swap for index positivity)
if fixed_denom is not None:
assert fixed_denom.shape == spectrum_unperturbed.shape
cg = np.divide(cg, fixed_denom)
else:
cg = cg / np.sqrt(np.mean((spectrum_unperturbed ** 2)))
return cg
def plot_spectrum_hists(spectrums, labels, method='U', hist='default', title_mod='', plotdir=FOLDER_OUTPUT, show=False):
# TODO fix x axis range -6 6
# TODO remove method from title since not used
def set_axis_style(ax, labels):
ax.get_xaxis().set_tick_params(direction='out')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks(np.arange(1, len(labels) + 1))
ax.set_xticklabels(labels)
ax.set_xlim(0.25, len(labels) + 0.75)
ax.set_xlabel('Sample name')
f = plt.figure(figsize=(10, 6))
if hist == 'default':
# plot first spectrum to get bins
_, bins, _ = plt.hist(spectrums[0, :], bins=10, range=[-6, 6], alpha=0.5, normed=True, label=labels[0])
for idx in xrange(1, len(labels)):
_ = plt.hist(spectrums[idx, :], bins=bins, alpha=0.5, normed=True, label=labels[idx])
plt.xlabel('Re(lambda)')
plt.ylabel('Spectrums')
elif hist == 'violin':
print 'hist type %s not yet implemented in plot_spectrum_hists(...)' % hist
plt.violinplot(spectrums.T, showmeans=False, showmedians=True)
set_axis_style(plt.gca(), labels)
plt.ylabel('Re(lambda)')
else:
print 'hist type %s not supported in plot_spectrum_hists(...)' % hist
assert 1==2
plt.title('Spectrums from %s %s' % (method, title_mod))
plt.legend()
plt.savefig(plotdir + os.sep + 'spectrum_hist_%s_%s_%s.png' % (hist, method, title_mod))
if show:
plt.show()
return
def plot_rank_order_spectrum(spectrum, method='U', title_mod='', plotdir=FOLDER_OUTPUT, show=False):
f = plt.figure(figsize=(10, 6))
sorted_spectrums_low_to_high = np.sort(spectrum)
sorted_spectrums_high_to_low = sorted_spectrums_low_to_high[::-1]
plt.bar(range(len(sorted_spectrums_high_to_low)), sorted_spectrums_high_to_low)
plt.axhline(0.0, linewidth=1.0, color='k')
plt.ylabel('Re(lambda)')
plt.xlabel('Eigenvalue ranking')
plt.title('Spectrum from %s %s' % (method, title_mod))
plt.savefig(plotdir + os.sep + 'spectrum_ranking_%s_%s.pdf' % (method, title_mod))
if show:
plt.show()
return
def plot_spectrum_extremes(spectrum_unperturbed, spectrums_perturbed, method='U', title_mod='', plotdir=FOLDER_OUTPUT, show=False, max=True):
n = len(spectrum_unperturbed)
bar_width = 0.45
plt.close('all')
f = plt.figure(figsize=(10, 6))
ax = plt.gca()
if max:
spectrum_unperturbed_max = np.max(spectrum_unperturbed)
spectrums_perturbed_maxes = np.max(spectrums_perturbed, axis=1)
plt.bar(np.arange(n), spectrums_perturbed_maxes, bar_width)
plt.axhline(spectrum_unperturbed_max, linewidth=1.0, color='g')
#plt.ylim(np.min(spectrums_perturbed_maxes) * 1.05, np.max(spectrums_perturbed_maxes) * 1.05)
plt.ylabel('Max Re(lambda)')
plt.title('Largest eigenvalue after row/col deletion (green = no deletion) from %s %s' % (method, title_mod))
figpath = plotdir + os.sep + 'spectrum_perturbed_max_%s_%s.pdf' % (method, title_mod)
else:
spectrum_unperturbed_min = np.min(spectrum_unperturbed)
spectrums_perturbed_mins = np.min(spectrums_perturbed, axis=1)
ax.bar(np.arange(n), spectrums_perturbed_mins, bar_width)
plt.axhline(spectrum_unperturbed_min, linewidth=1.0, color='g')
#plt.ylim(np.min(spectrums_perturbed_mins) * 1.05, np.max(spectrums_perturbed_mins) * 1.05)
plt.ylabel('Min Re(lambda)')
plt.title('Lowest eigenvalue after row/col deletion (green = no deletion) from %s %s' % (method, title_mod))
figpath = plotdir + os.sep + 'spectrum_perturbed_min_%s_%s.pdf' % (method, title_mod)
plt.axhline(0.0, linewidth=1.0, color='k')
ax.set_xticks(np.arange(n))
plt.xlabel('Index of deleted row/col')
plt.savefig(figpath)
if show:
plt.show()
return
def plot_sliding_tau_scores(tau_range, gene_score_arr, gene_score_label, score_type, plotdir=FOLDER_OUTPUT, show=False):
assert gene_score_arr.shape[0] == tau_range.shape[0]
plt.close('all')
f = plt.figure(figsize=(12, 7))
plt.plot(tau_range, gene_score_arr, '--ok', alpha=0.3)
# add vertical line at tau = 2.0 bifurcation
plt.axvline(2.0, linewidth=1.0, color='k', alpha=0.7)
# highlight top k curves
top_k = 2
sorted_top_curves = np.argsort(np.sum(gene_score_arr, axis=0))[::-1]
for rank, k in enumerate(sorted_top_curves[0:top_k]):
plt.plot(tau_range, gene_score_arr[:, k], '--o', alpha=0.7, label='rank%d = gene %d' % (rank, k))
for rank, k in enumerate(sorted_top_curves[-top_k:][::-1]):
plt.plot(tau_range, gene_score_arr[:, k], '--x', alpha=0.7, label='rank%d = gene %d' %
(gene_score_arr.shape[1] - 1 - rank, k))
plt.legend()
plt.xlabel('tau')
plt.ylabel('%s index' % score_type)
plt.title('%s index from %s over all genes, approaching bifurcation (tau=2.0)' % (score_type, gene_score_label))
figpath = plotdir + os.sep + 'score_%s_%s.pdf' % (gene_score_label, score_type)
plt.savefig(figpath)
if show:
plt.show()
return
if __name__ == '__main__':
num_spectrum = 10
fake_spectrums = np.random.normal(0.0, 2.0, (num_spectrum, 500))
fake_labels = [str(a) for a in range(num_spectrum)]
plot_spectrum_hists(fake_spectrums, fake_labels, hist='default', title_mod='(fake_main)', show=True)
plot_spectrum_hists(fake_spectrums, fake_labels, hist='violin', title_mod='(fake_main)', show=True)
|
mit
| 5,367,572,452,356,783,000
| 41.734884
| 141
| 0.626034
| false
| 3.083221
| false
| false
| false
|
johnnoone/aioconsul
|
tests/test_common.py
|
1
|
1306
|
import pytest
from aioconsul.common import Address, parse_addr
from aioconsul.common import duration_to_timedelta, timedelta_to_duration
from datetime import timedelta
@pytest.mark.parametrize("a, b", [
("10s", timedelta(seconds=10)),
("2m", timedelta(minutes=2)),
("2h", timedelta(hours=2)),
("2d", timedelta(days=2)),
])
def test_duration(a, b):
assert duration_to_timedelta(a) == b
assert a == timedelta_to_duration(b)
@pytest.mark.parametrize("input, expected", [
("localhost",
Address(proto=None, host="localhost", port=None)),
("http://localhost",
Address(proto="http", host="localhost", port=None)),
("udp://localhost",
Address(proto="udp", host="localhost", port=None)),
("tcp://localhost",
Address(proto="tcp", host="localhost", port=None)),
("unix://localhost",
Address(proto="unix", host="localhost", port=None)),
(("localhost", 8080),
Address(proto=None, host="localhost", port=8080)),
(8080,
Address(proto=None, host=None, port=8080)),
("127.0.0.1:8080",
Address(proto=None, host="127.0.0.1", port=8080)),
(Address(proto=None, host="localhost", port=None),
Address(proto=None, host="localhost", port=None)),
])
def test_addr(input, expected):
assert parse_addr(input) == expected
|
bsd-3-clause
| -8,695,781,263,740,024,000
| 32.487179
| 73
| 0.644717
| false
| 3.44591
| false
| false
| false
|
nasa/39A
|
spaceapps/awards/models.py
|
1
|
1227
|
from django.db import models
from projects.models import Project
from locations.models import Location
class LocalAward(models.Model):
project = models.ForeignKey(Project)
location = models.ForeignKey(Location)
title = models.CharField(max_length=100, blank=True)
def is_eligible(self):
if self.project.source_url:
return True
else:
return False
class Nomination(models.Model):
project = models.ForeignKey(Project)
location = models.ForeignKey(Location)
class GlobalAwardClass(models.Model):
title = models.CharField(max_length=150, unique=True, blank=True)
class Meta:
verbose_name = 'Global Award Class'
verbose_name_plural = 'Global Award Classes'
def __unicode__(self):
return self.title
class GlobalAwardFinalist(models.Model):
global_award_class = models.ForeignKey('GlobalAwardClass')
project = models.ForeignKey(Project)
best_in_class = models.BooleanField(default=False)
class Meta:
verbose_name = 'Global Award Finalist'
verbose_name_plural = 'Global Award Finalists'
def __unicode__(self):
return '%s (%s)' % (self.project.title, self.global_award_class.title)
|
apache-2.0
| 4,471,971,229,118,569,000
| 30.487179
| 78
| 0.688672
| false
| 3.907643
| false
| false
| false
|
commaai/openpilot
|
selfdrive/car/honda/carstate.py
|
1
|
17654
|
from cereal import car
from collections import defaultdict
from common.numpy_fast import interp
from opendbc.can.can_define import CANDefine
from opendbc.can.parser import CANParser
from selfdrive.config import Conversions as CV
from selfdrive.car.interfaces import CarStateBase
from selfdrive.car.honda.values import CAR, DBC, STEER_THRESHOLD, SPEED_FACTOR, HONDA_BOSCH, HONDA_BOSCH_ALT_BRAKE_SIGNAL
TransmissionType = car.CarParams.TransmissionType
def calc_cruise_offset(offset, speed):
# euristic formula so that speed is controlled to ~ 0.3m/s below pid_speed
# constraints to solve for _K0, _K1, _K2 are:
# - speed = 0m/s, out = -0.3
# - speed = 34m/s, offset = 20, out = -0.25
# - speed = 34m/s, offset = -2.5, out = -1.8
_K0 = -0.3
_K1 = -0.01879
_K2 = 0.01013
return min(_K0 + _K1 * speed + _K2 * speed * offset, 0.)
def get_can_signals(CP, gearbox_msg="GEARBOX"):
# this function generates lists for signal, messages and initial values
signals = [
("XMISSION_SPEED", "ENGINE_DATA", 0),
("WHEEL_SPEED_FL", "WHEEL_SPEEDS", 0),
("WHEEL_SPEED_FR", "WHEEL_SPEEDS", 0),
("WHEEL_SPEED_RL", "WHEEL_SPEEDS", 0),
("WHEEL_SPEED_RR", "WHEEL_SPEEDS", 0),
("STEER_ANGLE", "STEERING_SENSORS", 0),
("STEER_ANGLE_RATE", "STEERING_SENSORS", 0),
("MOTOR_TORQUE", "STEER_MOTOR_TORQUE", 0),
("STEER_TORQUE_SENSOR", "STEER_STATUS", 0),
("LEFT_BLINKER", "SCM_FEEDBACK", 0),
("RIGHT_BLINKER", "SCM_FEEDBACK", 0),
("GEAR", gearbox_msg, 0),
("SEATBELT_DRIVER_LAMP", "SEATBELT_STATUS", 1),
("SEATBELT_DRIVER_LATCHED", "SEATBELT_STATUS", 0),
("BRAKE_PRESSED", "POWERTRAIN_DATA", 0),
("BRAKE_SWITCH", "POWERTRAIN_DATA", 0),
("CRUISE_BUTTONS", "SCM_BUTTONS", 0),
("ESP_DISABLED", "VSA_STATUS", 1),
("USER_BRAKE", "VSA_STATUS", 0),
("BRAKE_HOLD_ACTIVE", "VSA_STATUS", 0),
("STEER_STATUS", "STEER_STATUS", 5),
("GEAR_SHIFTER", gearbox_msg, 0),
("PEDAL_GAS", "POWERTRAIN_DATA", 0),
("CRUISE_SETTING", "SCM_BUTTONS", 0),
("ACC_STATUS", "POWERTRAIN_DATA", 0),
]
checks = [
("ENGINE_DATA", 100),
("WHEEL_SPEEDS", 50),
("STEERING_SENSORS", 100),
("SEATBELT_STATUS", 10),
("CRUISE", 10),
("POWERTRAIN_DATA", 100),
("VSA_STATUS", 50),
("STEER_STATUS", 100),
("STEER_MOTOR_TORQUE", 0), # TODO: not on every car
]
if CP.carFingerprint == CAR.ODYSSEY_CHN:
checks += [
("SCM_FEEDBACK", 25),
("SCM_BUTTONS", 50),
]
else:
checks += [
("SCM_FEEDBACK", 10),
("SCM_BUTTONS", 25),
]
if CP.carFingerprint in (CAR.CRV_HYBRID, CAR.CIVIC_BOSCH_DIESEL, CAR.ACURA_RDX_3G):
checks += [
(gearbox_msg, 50),
]
else:
checks += [
(gearbox_msg, 100),
]
if CP.carFingerprint in HONDA_BOSCH_ALT_BRAKE_SIGNAL:
signals += [("BRAKE_PRESSED", "BRAKE_MODULE", 0)]
checks += [("BRAKE_MODULE", 50)]
if CP.carFingerprint in HONDA_BOSCH:
signals += [
("CAR_GAS", "GAS_PEDAL_2", 0),
("MAIN_ON", "SCM_FEEDBACK", 0),
("EPB_STATE", "EPB_STATUS", 0),
]
checks += [
("EPB_STATUS", 50),
("GAS_PEDAL_2", 100),
]
if not CP.openpilotLongitudinalControl:
signals += [
("CRUISE_CONTROL_LABEL", "ACC_HUD", 0),
("CRUISE_SPEED", "ACC_HUD", 0),
("ACCEL_COMMAND", "ACC_CONTROL", 0),
("AEB_STATUS", "ACC_CONTROL", 0),
]
checks += [
("ACC_HUD", 10),
("ACC_CONTROL", 50),
]
else: # Nidec signals
signals += [("CRUISE_SPEED_PCM", "CRUISE", 0),
("CRUISE_SPEED_OFFSET", "CRUISE_PARAMS", 0)]
if CP.carFingerprint == CAR.ODYSSEY_CHN:
checks += [("CRUISE_PARAMS", 10)]
else:
checks += [("CRUISE_PARAMS", 50)]
if CP.carFingerprint in (CAR.ACCORD, CAR.ACCORDH, CAR.CIVIC_BOSCH, CAR.CIVIC_BOSCH_DIESEL, CAR.CRV_HYBRID, CAR.INSIGHT, CAR.ACURA_RDX_3G):
signals += [("DRIVERS_DOOR_OPEN", "SCM_FEEDBACK", 1)]
elif CP.carFingerprint == CAR.ODYSSEY_CHN:
signals += [("DRIVERS_DOOR_OPEN", "SCM_BUTTONS", 1)]
elif CP.carFingerprint == CAR.HRV:
signals += [("DRIVERS_DOOR_OPEN", "SCM_BUTTONS", 1),
("WHEELS_MOVING", "STANDSTILL", 1)]
else:
signals += [("DOOR_OPEN_FL", "DOORS_STATUS", 1),
("DOOR_OPEN_FR", "DOORS_STATUS", 1),
("DOOR_OPEN_RL", "DOORS_STATUS", 1),
("DOOR_OPEN_RR", "DOORS_STATUS", 1),
("WHEELS_MOVING", "STANDSTILL", 1)]
checks += [
("DOORS_STATUS", 3),
("STANDSTILL", 50),
]
if CP.carFingerprint == CAR.CIVIC:
signals += [("CAR_GAS", "GAS_PEDAL_2", 0),
("MAIN_ON", "SCM_FEEDBACK", 0),
("IMPERIAL_UNIT", "HUD_SETTING", 0),
("EPB_STATE", "EPB_STATUS", 0)]
checks += [
("HUD_SETTING", 50),
("EPB_STATUS", 50),
("GAS_PEDAL_2", 100),
]
elif CP.carFingerprint == CAR.ACURA_ILX:
signals += [("CAR_GAS", "GAS_PEDAL_2", 0),
("MAIN_ON", "SCM_BUTTONS", 0)]
checks += [
("GAS_PEDAL_2", 100),
]
elif CP.carFingerprint in (CAR.CRV, CAR.CRV_EU, CAR.ACURA_RDX, CAR.PILOT_2019, CAR.RIDGELINE):
signals += [("MAIN_ON", "SCM_BUTTONS", 0)]
elif CP.carFingerprint == CAR.FIT:
signals += [("CAR_GAS", "GAS_PEDAL_2", 0),
("MAIN_ON", "SCM_BUTTONS", 0),
("BRAKE_HOLD_ACTIVE", "VSA_STATUS", 0)]
checks += [
("GAS_PEDAL_2", 100),
]
elif CP.carFingerprint == CAR.HRV:
signals += [("CAR_GAS", "GAS_PEDAL", 0),
("MAIN_ON", "SCM_BUTTONS", 0),
("BRAKE_HOLD_ACTIVE", "VSA_STATUS", 0)]
checks += [
("GAS_PEDAL", 100),
]
elif CP.carFingerprint == CAR.ODYSSEY:
signals += [("MAIN_ON", "SCM_FEEDBACK", 0),
("EPB_STATE", "EPB_STATUS", 0)]
checks += [("EPB_STATUS", 50)]
elif CP.carFingerprint == CAR.PILOT:
signals += [("MAIN_ON", "SCM_BUTTONS", 0),
("CAR_GAS", "GAS_PEDAL_2", 0)]
checks += [
("GAS_PEDAL_2", 0), # TODO: fix this freq, seems this signal isn't present at all on some models
]
elif CP.carFingerprint == CAR.ODYSSEY_CHN:
signals += [("MAIN_ON", "SCM_BUTTONS", 0),
("EPB_STATE", "EPB_STATUS", 0)]
checks += [("EPB_STATUS", 50)]
# add gas interceptor reading if we are using it
if CP.enableGasInterceptor:
signals.append(("INTERCEPTOR_GAS", "GAS_SENSOR", 0))
signals.append(("INTERCEPTOR_GAS2", "GAS_SENSOR", 0))
checks.append(("GAS_SENSOR", 50))
if CP.openpilotLongitudinalControl:
signals += [
("BRAKE_ERROR_1", "STANDSTILL", 1),
("BRAKE_ERROR_2", "STANDSTILL", 1)
]
checks += [("STANDSTILL", 50)]
return signals, checks
class CarState(CarStateBase):
def __init__(self, CP):
super().__init__(CP)
can_define = CANDefine(DBC[CP.carFingerprint]["pt"])
self.gearbox_msg = "GEARBOX"
if CP.carFingerprint == CAR.ACCORD and CP.transmissionType == TransmissionType.cvt:
self.gearbox_msg = "GEARBOX_15T"
self.shifter_values = can_define.dv[self.gearbox_msg]["GEAR_SHIFTER"]
self.steer_status_values = defaultdict(lambda: "UNKNOWN", can_define.dv["STEER_STATUS"]["STEER_STATUS"])
self.user_gas, self.user_gas_pressed = 0., 0
self.brake_switch_prev = 0
self.brake_switch_prev_ts = 0
self.cruise_setting = 0
self.v_cruise_pcm_prev = 0
def update(self, cp, cp_cam, cp_body):
ret = car.CarState.new_message()
# car params
v_weight_v = [0., 1.] # don't trust smooth speed at low values to avoid premature zero snapping
v_weight_bp = [1., 6.] # smooth blending, below ~0.6m/s the smooth speed snaps to zero
# update prevs, update must run once per loop
self.prev_cruise_buttons = self.cruise_buttons
self.prev_cruise_setting = self.cruise_setting
# ******************* parse out can *******************
# TODO: find wheels moving bit in dbc
if self.CP.carFingerprint in (CAR.ACCORD, CAR.ACCORDH, CAR.CIVIC_BOSCH, CAR.CIVIC_BOSCH_DIESEL, CAR.CRV_HYBRID, CAR.INSIGHT, CAR.ACURA_RDX_3G):
ret.standstill = cp.vl["ENGINE_DATA"]["XMISSION_SPEED"] < 0.1
ret.doorOpen = bool(cp.vl["SCM_FEEDBACK"]["DRIVERS_DOOR_OPEN"])
elif self.CP.carFingerprint == CAR.ODYSSEY_CHN:
ret.standstill = cp.vl["ENGINE_DATA"]["XMISSION_SPEED"] < 0.1
ret.doorOpen = bool(cp.vl["SCM_BUTTONS"]["DRIVERS_DOOR_OPEN"])
elif self.CP.carFingerprint == CAR.HRV:
ret.doorOpen = bool(cp.vl["SCM_BUTTONS"]["DRIVERS_DOOR_OPEN"])
else:
ret.standstill = not cp.vl["STANDSTILL"]["WHEELS_MOVING"]
ret.doorOpen = any([cp.vl["DOORS_STATUS"]["DOOR_OPEN_FL"], cp.vl["DOORS_STATUS"]["DOOR_OPEN_FR"],
cp.vl["DOORS_STATUS"]["DOOR_OPEN_RL"], cp.vl["DOORS_STATUS"]["DOOR_OPEN_RR"]])
ret.seatbeltUnlatched = bool(cp.vl["SEATBELT_STATUS"]["SEATBELT_DRIVER_LAMP"] or not cp.vl["SEATBELT_STATUS"]["SEATBELT_DRIVER_LATCHED"])
steer_status = self.steer_status_values[cp.vl["STEER_STATUS"]["STEER_STATUS"]]
ret.steerError = steer_status not in ["NORMAL", "NO_TORQUE_ALERT_1", "NO_TORQUE_ALERT_2", "LOW_SPEED_LOCKOUT", "TMP_FAULT"]
# NO_TORQUE_ALERT_2 can be caused by bump OR steering nudge from driver
self.steer_not_allowed = steer_status not in ["NORMAL", "NO_TORQUE_ALERT_2"]
# LOW_SPEED_LOCKOUT is not worth a warning
ret.steerWarning = steer_status not in ["NORMAL", "LOW_SPEED_LOCKOUT", "NO_TORQUE_ALERT_2"]
if not self.CP.openpilotLongitudinalControl:
self.brake_error = 0
else:
self.brake_error = cp.vl["STANDSTILL"]["BRAKE_ERROR_1"] or cp.vl["STANDSTILL"]["BRAKE_ERROR_2"]
ret.espDisabled = cp.vl["VSA_STATUS"]["ESP_DISABLED"] != 0
speed_factor = SPEED_FACTOR[self.CP.carFingerprint]
ret.wheelSpeeds.fl = cp.vl["WHEEL_SPEEDS"]["WHEEL_SPEED_FL"] * CV.KPH_TO_MS * speed_factor
ret.wheelSpeeds.fr = cp.vl["WHEEL_SPEEDS"]["WHEEL_SPEED_FR"] * CV.KPH_TO_MS * speed_factor
ret.wheelSpeeds.rl = cp.vl["WHEEL_SPEEDS"]["WHEEL_SPEED_RL"] * CV.KPH_TO_MS * speed_factor
ret.wheelSpeeds.rr = cp.vl["WHEEL_SPEEDS"]["WHEEL_SPEED_RR"] * CV.KPH_TO_MS * speed_factor
v_wheel = (ret.wheelSpeeds.fl + ret.wheelSpeeds.fr + ret.wheelSpeeds.rl + ret.wheelSpeeds.rr)/4.
# blend in transmission speed at low speed, since it has more low speed accuracy
v_weight = interp(v_wheel, v_weight_bp, v_weight_v)
ret.vEgoRaw = (1. - v_weight) * cp.vl["ENGINE_DATA"]["XMISSION_SPEED"] * CV.KPH_TO_MS * speed_factor + v_weight * v_wheel
ret.vEgo, ret.aEgo = self.update_speed_kf(ret.vEgoRaw)
ret.steeringAngleDeg = cp.vl["STEERING_SENSORS"]["STEER_ANGLE"]
ret.steeringRateDeg = cp.vl["STEERING_SENSORS"]["STEER_ANGLE_RATE"]
self.cruise_setting = cp.vl["SCM_BUTTONS"]["CRUISE_SETTING"]
self.cruise_buttons = cp.vl["SCM_BUTTONS"]["CRUISE_BUTTONS"]
ret.leftBlinker, ret.rightBlinker = self.update_blinker_from_stalk(
250, cp.vl["SCM_FEEDBACK"]["LEFT_BLINKER"], cp.vl["SCM_FEEDBACK"]["RIGHT_BLINKER"])
self.brake_hold = cp.vl["VSA_STATUS"]["BRAKE_HOLD_ACTIVE"]
if self.CP.carFingerprint in (CAR.CIVIC, CAR.ODYSSEY, CAR.CRV_5G, CAR.ACCORD, CAR.ACCORDH, CAR.CIVIC_BOSCH,
CAR.CIVIC_BOSCH_DIESEL, CAR.CRV_HYBRID, CAR.INSIGHT, CAR.ACURA_RDX_3G):
self.park_brake = cp.vl["EPB_STATUS"]["EPB_STATE"] != 0
main_on = cp.vl["SCM_FEEDBACK"]["MAIN_ON"]
elif self.CP.carFingerprint == CAR.ODYSSEY_CHN:
self.park_brake = cp.vl["EPB_STATUS"]["EPB_STATE"] != 0
main_on = cp.vl["SCM_BUTTONS"]["MAIN_ON"]
else:
self.park_brake = 0 # TODO
main_on = cp.vl["SCM_BUTTONS"]["MAIN_ON"]
gear = int(cp.vl[self.gearbox_msg]["GEAR_SHIFTER"])
ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(gear, None))
self.pedal_gas = cp.vl["POWERTRAIN_DATA"]["PEDAL_GAS"]
# crv doesn't include cruise control
if self.CP.carFingerprint in (CAR.CRV, CAR.CRV_EU, CAR.HRV, CAR.ODYSSEY, CAR.ACURA_RDX, CAR.RIDGELINE, CAR.PILOT_2019, CAR.ODYSSEY_CHN):
ret.gas = self.pedal_gas / 256.
else:
ret.gas = cp.vl["GAS_PEDAL_2"]["CAR_GAS"] / 256.
# this is a hack for the interceptor. This is now only used in the simulation
# TODO: Replace tests by toyota so this can go away
if self.CP.enableGasInterceptor:
self.user_gas = (cp.vl["GAS_SENSOR"]["INTERCEPTOR_GAS"] + cp.vl["GAS_SENSOR"]["INTERCEPTOR_GAS2"]) / 2.
self.user_gas_pressed = self.user_gas > 1e-5 # this works because interceptor read < 0 when pedal position is 0. Once calibrated, this will change
ret.gasPressed = self.user_gas_pressed
else:
ret.gasPressed = self.pedal_gas > 1e-5
ret.steeringTorque = cp.vl["STEER_STATUS"]["STEER_TORQUE_SENSOR"]
ret.steeringTorqueEps = cp.vl["STEER_MOTOR_TORQUE"]["MOTOR_TORQUE"]
ret.steeringPressed = abs(ret.steeringTorque) > STEER_THRESHOLD[self.CP.carFingerprint]
if self.CP.carFingerprint in HONDA_BOSCH:
if not self.CP.openpilotLongitudinalControl:
ret.cruiseState.nonAdaptive = cp.vl["ACC_HUD"]["CRUISE_CONTROL_LABEL"] != 0
ret.cruiseState.standstill = cp.vl["ACC_HUD"]["CRUISE_SPEED"] == 252.
# On set, cruise set speed pulses between 254~255 and the set speed prev is set to avoid this.
ret.cruiseState.speed = self.v_cruise_pcm_prev if cp.vl["ACC_HUD"]["CRUISE_SPEED"] > 160.0 else cp.vl["ACC_HUD"]["CRUISE_SPEED"] * CV.KPH_TO_MS
self.v_cruise_pcm_prev = ret.cruiseState.speed
else:
ret.cruiseState.speedOffset = calc_cruise_offset(cp.vl["CRUISE_PARAMS"]["CRUISE_SPEED_OFFSET"], ret.vEgo)
ret.cruiseState.speed = cp.vl["CRUISE"]["CRUISE_SPEED_PCM"] * CV.KPH_TO_MS
self.brake_switch = cp.vl["POWERTRAIN_DATA"]["BRAKE_SWITCH"] != 0
if self.CP.carFingerprint in HONDA_BOSCH_ALT_BRAKE_SIGNAL:
ret.brakePressed = cp.vl["BRAKE_MODULE"]["BRAKE_PRESSED"] != 0
else:
# brake switch has shown some single time step noise, so only considered when
# switch is on for at least 2 consecutive CAN samples
# panda safety only checks BRAKE_PRESSED signal
ret.brakePressed = bool(cp.vl["POWERTRAIN_DATA"]["BRAKE_PRESSED"] or
(self.brake_switch and self.brake_switch_prev and cp.ts["POWERTRAIN_DATA"]["BRAKE_SWITCH"] != self.brake_switch_prev_ts))
self.brake_switch_prev = self.brake_switch
self.brake_switch_prev_ts = cp.ts["POWERTRAIN_DATA"]["BRAKE_SWITCH"]
ret.brake = cp.vl["VSA_STATUS"]["USER_BRAKE"]
ret.cruiseState.enabled = cp.vl["POWERTRAIN_DATA"]["ACC_STATUS"] != 0
ret.cruiseState.available = bool(main_on)
# Gets rid of Pedal Grinding noise when brake is pressed at slow speeds for some models
if self.CP.carFingerprint in (CAR.PILOT, CAR.PILOT_2019, CAR.RIDGELINE):
if ret.brake > 0.05:
ret.brakePressed = True
# TODO: discover the CAN msg that has the imperial unit bit for all other cars
self.is_metric = not cp.vl["HUD_SETTING"]["IMPERIAL_UNIT"] if self.CP.carFingerprint in (CAR.CIVIC) else False
if self.CP.carFingerprint in HONDA_BOSCH:
ret.stockAeb = (not self.CP.openpilotLongitudinalControl) and bool(cp.vl["ACC_CONTROL"]["AEB_STATUS"] and cp.vl["ACC_CONTROL"]["ACCEL_COMMAND"] < -1e-5)
else:
ret.stockAeb = bool(cp_cam.vl["BRAKE_COMMAND"]["AEB_REQ_1"] and cp_cam.vl["BRAKE_COMMAND"]["COMPUTER_BRAKE"] > 1e-5)
if self.CP.carFingerprint in HONDA_BOSCH:
self.stock_hud = False
ret.stockFcw = False
else:
ret.stockFcw = cp_cam.vl["BRAKE_COMMAND"]["FCW"] != 0
self.stock_hud = cp_cam.vl["ACC_HUD"]
self.stock_brake = cp_cam.vl["BRAKE_COMMAND"]
if self.CP.enableBsm and self.CP.carFingerprint in (CAR.CRV_5G, ):
# BSM messages are on B-CAN, requires a panda forwarding B-CAN messages to CAN 0
# more info here: https://github.com/commaai/openpilot/pull/1867
ret.leftBlindspot = cp_body.vl["BSM_STATUS_LEFT"]["BSM_ALERT"] == 1
ret.rightBlindspot = cp_body.vl["BSM_STATUS_RIGHT"]["BSM_ALERT"] == 1
return ret
def get_can_parser(self, CP):
signals, checks = get_can_signals(CP, self.gearbox_msg)
bus_pt = 1 if CP.carFingerprint in HONDA_BOSCH else 0
return CANParser(DBC[CP.carFingerprint]["pt"], signals, checks, bus_pt)
@staticmethod
def get_cam_can_parser(CP):
signals = []
# all hondas except CRV, RDX and 2019 Odyssey@China use 0xe4 for steering
checks = [(0xe4, 100)]
if CP.carFingerprint in [CAR.CRV, CAR.CRV_EU, CAR.ACURA_RDX, CAR.ODYSSEY_CHN]:
checks = [(0x194, 100)]
if CP.carFingerprint not in HONDA_BOSCH:
signals += [("COMPUTER_BRAKE", "BRAKE_COMMAND", 0),
("AEB_REQ_1", "BRAKE_COMMAND", 0),
("FCW", "BRAKE_COMMAND", 0),
("CHIME", "BRAKE_COMMAND", 0),
("FCM_OFF", "ACC_HUD", 0),
("FCM_OFF_2", "ACC_HUD", 0),
("FCM_PROBLEM", "ACC_HUD", 0),
("ICONS", "ACC_HUD", 0)]
checks += [
("ACC_HUD", 10),
("BRAKE_COMMAND", 50),
]
return CANParser(DBC[CP.carFingerprint]["pt"], signals, checks, 2)
@staticmethod
def get_body_can_parser(CP):
if CP.enableBsm and CP.carFingerprint == CAR.CRV_5G:
signals = [("BSM_ALERT", "BSM_STATUS_RIGHT", 0),
("BSM_ALERT", "BSM_STATUS_LEFT", 0)]
checks = [
("BSM_STATUS_LEFT", 3),
("BSM_STATUS_RIGHT", 3),
]
bus_body = 0 # B-CAN is forwarded to ACC-CAN radar side (CAN 0 on fake ethernet port)
return CANParser(DBC[CP.carFingerprint]["body"], signals, checks, bus_body)
return None
|
mit
| 1,605,306,792,609,832,000
| 41.4375
| 158
| 0.614648
| false
| 2.774477
| false
| false
| false
|
opinsys/aptirepo
|
aptirepo/setup.py
|
1
|
2037
|
# -*- coding: utf-8 -*-
# aptirepo - Simple APT Repository Tool
# Copyright (C) 2013,2014,2015 Opinsys
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from distutils.core import setup
import os.path
import re
import subprocess
version = re.search('Version: (.*)', subprocess.check_output(
['dpkg-parsechangelog', '-l../debian/changelog'])).group(1)
setup(name='aptirepo',
version=version,
description='Simple APT Repository Tool.',
author='Tuomas Räsänen',
author_email='tuomasjjrasanen@tjjr.fi',
url='http://github.com/opinsys/aptirepo',
scripts=['aptirepo'],
package_dir={'aptirepo': 'lib'},
packages=['aptirepo'],
license='GPLv2+',
platforms=['Linux'],
classifiers=[
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: GNU General Public License (GPL)",
"Operating System :: POSIX :: Linux",
"Topic :: System :: Archiving :: Packaging",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
],
requires=['debian'],
provides=['aptirepo'],
)
|
gpl-2.0
| 7,825,566,279,488,568,000
| 36.685185
| 73
| 0.660934
| false
| 4.078156
| false
| false
| false
|
batoure/ScienceManager
|
App/service/data/provider.py
|
1
|
3148
|
#service.data.provider
from service.data.factory.baseProvider import BaseProvider
#TODO: Add textwrap to a query post processor
from model.workflow import *
class Provider(BaseProvider):
def __init__(self, log, connectionSettings):
self._log = log
#pass to the base connectionSettings
self._connection_string = ""
self._connection_settings = connectionSettings
BaseProvider.__init__(self, self._log, connectionSettings)
def reserve_next_batch_number(self, workflow, user):
batch = None
with self._dbProviderFactory.create_connection(self._connection_settings) as conn:
conn.connection_string = self._connection_string
conn.open()
with conn.create_command() as cmd:
cmd.command_timeout = 0
cmd.command_text("""
INSERT INTO WF_BATCH
( WF_ID, WF_USER)
VALUES
({workflow} , {user})
""", {'workflow': workflow, 'user': user})
cmd.execute_non_query()
cmd.command_text('SELECT BATCH_ID FROM (SELECT MAX(DATE_DT) AS DATE_DT, BATCH_ID FROM WF_BATCH GROUP BY BATCH_ID) AS A',{})
link_list = cmd.execute_scalar()
for link in link_list:
batch = link
conn.commit()
conn.close()
return batch
def get_program_details(self, workflow, name):
with self._dbProviderFactory.create_connection(self._connection_settings) as conn:
conn.connection_string = self._connection_string
conn.open()
with conn.create_command() as cmd:
cmd.command_timeout = 0
cmd.command_text("SELECT WF_ID, WF_NAME, WF_FAILURE FROM WF_MASTER_L WHERE WF_NAME = '{workflowname}'", {'workflowname': name})
for row in cmd.execute_reader():
workflow.id, workflow.name, workflow.id = row
conn.commit()
conn.close()
return workflow
def get_program_actions(self, work_flow_id):
tasks = []
with self._dbProviderFactory.create_connection(self._connection_settings) as conn:
conn.connection_string = self._connection_string
conn.open()
with conn.create_command() as cmd:
cmd.command_timeout = 0
cmd.command_text("SELECT WF_ID, WF_STEPS_L.STEP_ID, WF_STEPS_L.ACTION_NAME, WF_ACTIONS_TYPES_L.ACTION_TYPE_NAME, WF_STEPS_L.ACTION_TYPE_ID, WF_STEPS_L.ACTION_TXT FROM WF_STEPS_L JOIN WF_ACTIONS_TYPES_L ON WF_STEPS_L.ACTION_TYPE_ID = WF_ACTIONS_TYPES_L.ACTION_TYPE_ID WHERE WF_ID = '{id}'",{'id': work_flow_id})
for row in cmd.execute_reader():
task = Task()
wfid, task.number, task.action.name, task.action.type.name, task.action.type.id, task.action.text = row
tasks.append(task)
conn.commit()
conn.close()
return tasks
|
mit
| 8,614,728,280,032,674,000
| 47.446154
| 326
| 0.56385
| false
| 4.109661
| false
| false
| false
|
arizvisa/syringe
|
lib/ptypes/__init__.py
|
1
|
1981
|
from . import ptype, parray, pstruct, pbinary, pint, pfloat, pstr
from . import utils, dynamic, provider
dyn, prov = dynamic, provider
__all__ = 'ptype','parray','pstruct','pbinary','pint','pfloat','pstr','dynamic','dyn','prov'
from . import config
Config = config.defaults
## globally changing the ptype provider
def setsource(provider):
'''Sets the default ptype provider to the one specified'''
provider.seek,provider.consume,provider.store
ptype.source = provider
return provider
## globally changing the byte order
def setbyteorder(endianness):
'''
Sets the integer byte order to the endianness specified for all non-binary types.
Can be either config.byteorder.bigendian or config.byteorder.littleendian.
'''
[ module.setbyteorder(endianness) for module in (ptype,pint,pfloat) ]
## some things people people might find useful
#from ptype import debug, debugrecurse
from .ptype import istype, iscontainer, isinstance, undefined
from .provider import file, memory
from .utils import hexdump
if __name__ == '__main__':
import builtins, ptypes
class a(ptypes.ptype.type):
length = 4
data = b'\x41\x41\x41\x41'
import ctypes
b = ctypes.cast(ctypes.pointer(ctypes.c_buffer(data,4)), ctypes.c_void_p)
ptypes.setsource(ptypes.prov.memory())
print('ptype-static-memory', builtins.isinstance(ptypes.ptype.source, ptypes.prov.memory))
print('ptype-instance-memory', builtins.isinstance(ptypes.ptype.type().source, ptypes.prov.memory))
c = a(offset=b.value).l
print('type-instance-memory', c.serialize() == data)
ptypes.setsource(ptypes.prov.empty())
print('ptype-static-empty', builtins.isinstance(ptypes.ptype.source, ptypes.prov.empty))
print('ptype-instance-empty', builtins.isinstance(ptypes.ptype.type().source, ptypes.prov.empty))
c = a(offset=b.value).l
print('type-instance-empty', c.serialize() == b'\x00\x00\x00\x00')
ptypes.setsource(ptypes.prov.memory())
|
bsd-2-clause
| 1,039,058,835,156,158,200
| 36.377358
| 103
| 0.714286
| false
| 3.51865
| false
| false
| false
|
YannickB/odoo-hosting
|
clouder_template_shinken/template.py
|
1
|
11712
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Buron
# Copyright 2015, TODAY Clouder SASU
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License with Attribution
# clause as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License with
# Attribution clause along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
##############################################################################
try:
from odoo import models, api, modules
except ImportError:
from openerp import models, api, modules
class ClouderNode(models.Model):
"""
Add methods to manage the shinken specificities.
"""
_inherit = 'clouder.node'
@property
def shinken_configfile(self):
"""
Property returning the shinken config file.
"""
return '/usr/local/shinken/etc/hosts/' + self.fulldomain + '.cfg'
class ClouderContainer(models.Model):
"""
Add methods to manage the shinken specificities.
"""
_inherit = 'clouder.service'
@property
def shinken_configfile(self):
"""
Property returning the shinken config file.
"""
return '/usr/local/shinken/etc/services/' + self.fullname + '.cfg'
@api.multi
def deploy_shinken_node(self, nrpe):
"""
Deploy the configuration file to watch the node performances.
"""
node = nrpe.node_id
self.send(
modules.get_module_path('clouder_template_shinken') +
'/res/node-shinken.config', node.shinken_configfile,
username='shinken')
self.execute([
'sed', '-i',
'"s/IP/' + node.ip + '/g"',
node.shinken_configfile], username='shinken')
self.execute([
'sed', '-i',
'"s/NAME/' + node.name + '/g"',
node.shinken_configfile], username='shinken')
self.execute([
'sed', '-i',
'"s/SSHPORT/' + str(node.ssh_port) + '/g"',
node.shinken_configfile], username='shinken')
self.execute([
'sed', '-i',
'"s/NRPEPORT/' + nrpe.ports['nrpe']['hostport'] + '/g"',
node.shinken_configfile], username='shinken')
self.execute(['/usr/local/shinken/bin/init.d/shinken', 'reload'],
username='shinken')
@api.multi
def purge_shinken_node(self, nrpe):
"""
Remove the configuration file.
"""
self.execute(['rm', nrpe.node_id.shinken_configfile],
username='shinken')
self.execute(['/usr/local/shinken/bin/init.d/shinken', 'reload'],
username='shinken')
@api.multi
def deploy_post(self):
"""
Add the general configuration files.
"""
super(ClouderContainer, self).deploy_post()
if self.application_id.type_id.name == 'shinken' \
and self.application_id.check_tags(['data']):
self.send(
modules.get_module_path('clouder_template_shinken') +
'/res/general-shinken.config',
'/usr/local/shinken/etc/services/clouder.cfg',
username='shinken')
self.execute([
'sed', '-i', '"s/SYSADMIN_MAIL/' +
self.email_sysadmin + '/g"',
'/usr/local/shinken/etc/services/clouder.cfg'],
username='shinken')
self.execute(
['rm', '/usr/local/shinken/etc/hosts/localhost.cfg'],
username='shinken')
class ClouderBase(models.Model):
"""
Add methods to manage the shinken specificities.
"""
_inherit = 'clouder.base'
@property
def shinken_configfile(self):
"""
Property returning the shinken config file.
"""
return '/usr/local/shinken/etc/services/' + self.fullname + '.cfg'
@api.multi
def deploy_post(self):
"""
Update odoo configuration.
"""
res = super(ClouderBase, self).deploy_post()
if self.application_id.type_id.name == 'shinken':
self.service_id.execute([
'sed', '-i', '"s/SHINKENDOMAIN/' +
self.fulldomain + '/g"',
'/usr/local/shinken/etc/services/clouder.cfg'],
username='shinken')
self.service_id.execute(
['/usr/local/shinken/bin/init.d/shinken', 'reload'],
username='shinken')
return res
@api.multi
def purge_post(self):
"""
Remove filestore.
"""
res = super(ClouderBase, self).purge_post()
if self.application_id.type_id.name == 'shinken':
self.service_id.execute([
'sed', '-i', '"s/' + self.fulldomain + '/SHINKENDOMAIN/g"',
'/usr/local/shinken/etc/services/clouder.cfg'],
username='shinken')
self.service_id.execute(
['/usr/local/shinken/bin/init.d/shinken', 'reload'],
username='shinken')
return res
class ClouderContainerLink(models.Model):
"""
Add methods to manage the shinken specificities.
"""
_inherit = 'clouder.service.link'
@api.multi
def deploy_link(self):
"""
Deploy the configuration file to watch the service.
"""
super(ClouderContainerLink, self).deploy_link()
if self.target \
and self.target.application_id.type_id.name == 'shinken':
if self.service_id.auto_backup:
config_file = 'service-shinken'
self.target.send(
modules.get_module_path('clouder_template_shinken') +
'/res/' + config_file + '.config',
self.service_id.shinken_configfile, username='shinken')
self.target.execute([
'sed', '-i',
'"s/BACKUPIP/' +
self.service_id.backup_ids[0].node_id.ip + '/g"',
self.service_id.shinken_configfile], username='shinken')
self.target.execute([
'sed', '-i',
'"s/PORT/' +
self.service_id.backup_ids[0].ports['nrpe']['hostport'] +
'/g"',
self.service_id.shinken_configfile], username='shinken')
self.target.execute([
'sed', '-i', '"s/METHOD/' +
self.service_id.backup_ids[0].backup_method + '/g"',
self.service_id.shinken_configfile], username='shinken')
self.target.execute([
'sed', '-i', '"s/TYPE/service/g"',
self.service_id.shinken_configfile], username='shinken')
self.target.execute([
'sed', '-i',
'"s/BACKUPIP/' +
self.service_id.backup_ids[0].node_id.ip + '/g"',
self.service_id.shinken_configfile], username='shinken')
self.target.execute([
'sed', '-i',
'"s/UNIQUE_NAME/' + self.service_id.fullname + '/g"',
self.service_id.shinken_configfile], username='shinken')
self.target.execute([
'sed', '-i',
'"s/HOST/' + self.service_id.node_id.name + '/g"',
self.service_id.shinken_configfile], username='shinken')
self.target.execute(
['/usr/local/shinken/bin/init.d/shinken', 'reload'],
username='shinken')
@api.multi
def purge_link(self):
"""
Remove the configuration file.
"""
super(ClouderContainerLink, self).purge_link()
if self.target \
and self.target.application_id.type_id.name == 'shinken':
self.target.execute(['rm', self.service_id.shinken_configfile],
username='shinken')
self.target.execute(
['/usr/local/shinken/bin/init.d/shinken', 'reload'],
username='shinken')
class ClouderBaseLink(models.Model):
"""
Add methods to manage the shinken specificities.
"""
_inherit = 'clouder.base.link'
@api.multi
def deploy_link(self):
"""
Deploy the configuration file to watch the base.
"""
super(ClouderBaseLink, self).deploy_link()
if self.target \
and self.target.application_id.type_id.name == 'shinken':
config_file = 'base-shinken'
if not self.base_id.auto_backup:
config_file = 'base-shinken-no-backup'
self.target.send(
modules.get_module_path('clouder_template_shinken') +
'/res/' + config_file + '.config',
self.base_id.shinken_configfile, username='shinken')
self.target.execute([
'sed', '-i',
'"s/BACKUPIP/' +
self.base_id.backup_ids[0].node_id.ip + '/g"',
self.base_id.shinken_configfile], username='shinken')
self.target.execute([
'sed', '-i',
'"s/PORT/' +
self.base_id.backup_ids[0].ports['nrpe']['hostport'] + '/g"',
self.base_id.shinken_configfile], username='shinken')
self.target.execute([
'sed', '-i', '"s/METHOD/' +
self.base_id.backup_ids[0].backup_method + '/g"',
self.base_id.shinken_configfile], username='shinken')
self.target.execute([
'sed', '-i', '"s/TYPE/base/g"',
self.base_id.shinken_configfile], username='shinken')
self.target.execute([
'sed', '-i',
'"s/UNIQUE_NAME/' + self.base_id.fullname + '/g"',
self.base_id.shinken_configfile], username='shinken')
self.target.execute([
'sed', '-i',
'"s/DATABASES/' + self.base_id.db_names_comma + '/g"',
self.base_id.shinken_configfile], username='shinken')
self.target.execute([
'sed', '-i', '"s/BASE/' + self.base_id.name + '/g"',
self.base_id.shinken_configfile], username='shinken')
self.target.execute([
'sed', '-i',
'"s/DOMAIN/' + self.base_id.fulldomain + '/g"',
self.base_id.shinken_configfile], username='shinken')
self.target.execute(
['/usr/local/shinken/bin/init.d/shinken', 'reload'],
username='shinken')
@api.multi
def purge_link(self):
"""
Remove the configuration file.
"""
super(ClouderBaseLink, self).purge_link()
if self.target \
and self.target.application_id.type_id.name == 'shinken':
self.target.execute(['rm', self.base_id.shinken_configfile],
username='shinken')
self.target.execute(
['/usr/local/shinken/bin/init.d/shinken', 'reload'],
username='shinken')
|
agpl-3.0
| -3,848,946,444,569,847,300
| 36.538462
| 79
| 0.518272
| false
| 4.024742
| true
| false
| false
|
agoose77/hivesystem
|
manual/tetris/tetris-8.py
|
1
|
10118
|
import copy
import bee
import dragonfly.pandahive
from dragonfly.grid import bgrid
from dragonfly.canvas import box2d
import dragonfly.std, dragonfly.gen, dragonfly.random, dragonfly.logic
blocks = (
bgrid(values=((0, 0), (1, 0), (2, 0), (3, 0))), # I
bgrid(values=((0, 1), (0, 0), (1, 0), (2, 0))), #J
bgrid(values=((0, 0), (1, 0), (2, 0), (2, 1))), #L
bgrid(values=((0, 1), (0, 0), (1, 1), (1, 0))), #O
bgrid(values=((0, 0), (1, 0), (1, 1), (2, 1))), #S
bgrid(values=((0, 0), (1, 0), (1, 1), (2, 0))), #T
bgrid(values=((0, 1), (1, 1), (1, 0), (2, 0))), # Z
)
emptygrid = bgrid(0, 0, 0, 0)
from bee.segments import *
class tetris_init_main(bee.worker):
gridx = variable("int")
parameter(gridx)
gridy = variable("int")
parameter(gridy)
start = antenna("push", "trigger")
outp = output("push", ("object", "bgrid"))
grid = variable(("object", "bgrid"))
t_outp = transistor(("object", "bgrid"))
connect(grid, t_outp)
connect(t_outp, outp)
trig = triggerfunc(t_outp)
@modifier
def m_start(self):
self.grid = bgrid(0, self.gridx - 1, 0, self.gridy - 1)
self.trig()
trigger(start, m_start)
class tetris_control(bee.worker):
maingrid = antenna("pull", ("object", "bgrid"))
blockgrid = antenna("pull", ("object", "bgrid"))
grid1 = buffer("pull", ("object", "bgrid"))
connect(maingrid, grid1)
grid2 = buffer("pull", ("object", "bgrid"))
connect(blockgrid, grid2)
get_grids = triggerfunc(grid1, "input")
trigger(grid1, grid2, "input", "input")
lost = output("push", "trigger")
trig_lost = triggerfunc(lost)
place_init = antenna("push", "trigger")
@modifier
def m_place_init(self):
self.get_grids()
dx = int(self.grid1.maxx / 2) - self.grid2.minx
self.grid2.maxx += dx
self.grid2.minx += dx
dy = self.grid1.maxy - self.grid2.maxy
self.grid2.maxy += dy
self.grid2.miny += dy
if self.grid1.overlap(self.grid2):
self.trig_lost()
trigger(place_init, m_place_init)
dropped = output("push", "trigger")
trig_dropped = triggerfunc(dropped)
move_down = antenna("push", "trigger")
@modifier
def m_move_down(self):
self.get_grids()
block = copy.copy(self.grid2)
block.translate(0, -1)
if block.miny < 0 or self.grid1.overlap(block):
self.grid1.merge(self.grid2)
self.remove_lines()
self.trig_dropped()
else:
self.grid2.translate(0, -1)
trigger(move_down, m_move_down)
def move_sideways(self, direction):
self.get_grids()
block = copy.copy(self.grid2)
block.translate(direction, 0)
if block.minx < 0: return
if block.maxx > self.grid1.maxx: return
if self.grid1.overlap(block): return
self.grid2.translate(direction, 0)
move_left = antenna("push", "trigger")
@modifier
def m_move_left(self):
self.move_sideways(-1)
trigger(move_left, m_move_left)
move_right = antenna("push", "trigger")
@modifier
def m_move_right(self):
self.move_sideways(1)
trigger(move_right, m_move_right)
def rotate(self, times):
self.get_grids()
block = copy.copy(self.grid2)
block.rotate(times)
if block.minx < 0:
block.translate(-block.minx, 0)
if block.maxx > self.grid1.maxx:
block.translate(self.grid1.maxx - block.maxx, 0)
if self.grid1.overlap(block): return
self.grid2.set(block)
rotate_cw = antenna("push", "trigger")
@modifier
def m_rotate_cw(self):
self.rotate(3)
trigger(rotate_cw, m_rotate_cw)
rotate_ccw = antenna("push", "trigger")
@modifier
def m_rotate_ccw(self):
self.rotate(1)
trigger(rotate_ccw, m_rotate_ccw)
drop = antenna("push", "trigger")
@modifier
def m_drop(self):
self.get_grids()
block = copy.copy(self.grid2)
while block.miny >= 0 and not self.grid1.overlap(block):
block.translate(0, -1)
block.translate(0, 1)
self.grid1.merge(block)
self.remove_lines()
self.trig_dropped()
trigger(drop, m_drop)
def remove_lines(self):
values = self.grid1.get_values()
removed = 0
y = 0
while y < self.grid1.maxy + 1:
line = [v for v in values if v[1] == y]
if len(line) == self.grid1.maxx + 1:
values = [v for v in values if v[1] != y]
values = [(v[0], v[1] - 1) if v[1] > y else v for v in values]
removed += 1
else:
y += 1
if removed: self.grid1.set_values(values)
from bee import antenna, output, connect, attribute, configure, parameter, get_parameter
class tetris_select_block(bee.frame):
blocks = parameter("object")
blocks_ = get_parameter("blocks")
w_blocks = dragonfly.gen.gentuple2(blocks_)
sel = dragonfly.random.choice()
connect(w_blocks, sel)
do_select = dragonfly.gen.transistor()
connect(sel, do_select)
chosen = dragonfly.std.variable(("object", "bgrid"))(emptygrid)
chosencontrol = dragonfly.grid.bgridcontrol()
connect(chosen, chosencontrol.grid)
connect(do_select, chosen)
do_select2 = dragonfly.gen.transistor()
connect(chosen, do_select2)
uptofour = dragonfly.std.variable(("int", "int"))((0, 4))
randint = dragonfly.random.randint()
connect(uptofour, randint)
rotate = dragonfly.std.transistor("int")()
connect(randint, rotate)
connect(rotate, chosencontrol.rotate)
trigger = dragonfly.std.pushconnector("trigger")()
connect(trigger, do_select)
connect(trigger, rotate)
connect(trigger, do_select2)
select = antenna(trigger.inp)
selected = output(do_select2.outp)
class tetris_draw(bee.frame):
mainarea_ = attribute("parent", "mainarea")
mainarea_id_ = attribute("parent", "mainarea_id")
drawgrid = dragonfly.std.variable(("object", "bgrid"))(emptygrid)
drawgridcontrol = dragonfly.grid.bgridcontrol()
connect(drawgrid, drawgridcontrol.grid)
w_draw = dragonfly.canvas.draw3(("object", "bgrid"))(mainarea_id_)
do_draw = dragonfly.std.transistor(("object", "bgrid"))()
connect(drawgrid, do_draw)
connect(do_draw, w_draw)
update = dragonfly.canvas.update3(mainarea_id_)
maingridcontrol = dragonfly.grid.bgridcontrol()
copy_maingrid = dragonfly.std.transistor(("object", "bgrid"))()
connect(maingridcontrol.copy, copy_maingrid)
connect(copy_maingrid, drawgridcontrol.set)
t_blockgrid = dragonfly.std.transistor(("object", "bgrid"))()
connect(t_blockgrid, drawgridcontrol.merge)
trigger = dragonfly.std.pushconnector("trigger")()
connect(trigger, copy_maingrid)
connect(trigger, t_blockgrid)
connect(trigger, update)
start = antenna(do_draw.trig)
maingrid = antenna(maingridcontrol.grid)
blockgrid = antenna(t_blockgrid.inp)
draw = antenna(trigger.inp)
class parameters(object):
def __init__(self, **args):
for a in args: setattr(self, a, args[a])
class main(dragonfly.pandahive.pandahive):
blocks = blocks
gridx = 10
gridy = 20
mainarea = box2d(100, 150, 225, 375)
mainarea_id = "main"
mainarea_parameters = parameters(color=(0.5, 0.5, 0.5, 0))
scorearea = box2d(170, 100, 80, 40)
scorearea_id = "score"
canvas = dragonfly.pandahive.pandacanvas()
blocks_ = attribute("blocks")
gridx_ = attribute("gridx")
gridy_ = attribute("gridy")
mainarea_ = attribute("mainarea")
mainarea_parameters_ = attribute("mainarea_parameters")
mainarea_id_ = attribute("mainarea_id")
scorearea_ = attribute("scorearea")
scorearea_id_ = attribute("scorearea_id")
c0 = configure("canvas") # must have a lower-alphabet name than "canvas"
c0.reserve(mainarea_id_, ("object", "bgrid"), box=mainarea_, parameters=mainarea_parameters_)
maingrid = dragonfly.std.variable(("object", "bgrid"))(emptygrid)
maingridcontrol = dragonfly.grid.bgridcontrol()
connect(maingrid, maingridcontrol.grid)
blockgrid = dragonfly.std.variable(("object", "bgrid"))(emptygrid)
blockgridcontrol = dragonfly.grid.bgridcontrol()
connect(blockgrid, blockgridcontrol.grid)
select_block = tetris_select_block(blocks=blocks_)
connect(select_block, blockgridcontrol.set)
init_main = tetris_init_main(gridx_, gridy_)
connect(init_main, maingridcontrol.set)
draw = tetris_draw()
connect(maingrid, draw.maingrid)
connect(blockgrid, draw.blockgrid)
control = tetris_control()
connect(maingrid, control.maingrid)
connect(blockgrid, control.blockgrid)
start = dragonfly.sys.startsensor()
connect(start, select_block)
connect(start, init_main.start)
connect(start, control.place_init)
connect(start, draw.start)
connect(start, draw.draw)
period = dragonfly.std.variable("float")(0.3)
cycle = dragonfly.time.cycle()
connect(period, cycle)
connect(cycle, control.move_down)
connect(cycle, draw.draw)
connect(control.dropped, select_block)
connect(control.dropped, control.place_init)
connect(control.lost, "exitactuator")
k_left = dragonfly.io.keyboardsensor_trigger("LEFT")
connect(k_left, control.move_left)
connect(k_left, draw.draw)
k_right = dragonfly.io.keyboardsensor_trigger("RIGHT")
connect(k_right, control.move_right)
connect(k_right, draw.draw)
k_return = dragonfly.io.keyboardsensor_trigger("RETURN")
connect(k_return, control.rotate_cw)
connect(k_return, draw.draw)
k_space = dragonfly.io.keyboardsensor_trigger("SPACE")
connect(k_space, control.rotate_ccw)
connect(k_space, draw.draw)
k_down = dragonfly.io.keyboardsensor_trigger("DOWN")
connect(k_down, control.drop)
connect(k_down, draw.draw)
raiser = bee.raiser()
connect("evexc", raiser)
m = main().getinstance()
m.build("main")
m.place()
m.close()
m.init()
m.run()
|
bsd-2-clause
| 6,362,046,269,218,194,000
| 29.113095
| 97
| 0.627496
| false
| 3.119951
| false
| false
| false
|
jmartinezchaine/OpenERP
|
openerp/addons/nan_account_extension/partner.py
|
1
|
6259
|
# -*- encoding: latin-1 -*-
##############################################################################
#
# Copyright (c) 2010 NaN Projectes de Programari Lliure, S.L. All Rights Reserved.
# http://www.NaN-tic.com
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from osv import osv
from osv import fields
from tools.translate import _
class res_partner(osv.osv):
_inherit = 'res.partner'
def update_account(self, cr, uid, partner_id, account_type, context, force_checked=None):
if account_type not in ('receivable', 'payable'):
return
company = self.pool.get('res.users').browse(cr, uid, uid, context).company_id
parent_account = getattr(company, 'parent_%s_account_id' % account_type )
if not parent_account:
return
partner = self.browse(cr, uid, partner_id, context)
if account_type == 'receivable':
checked = partner.customer
else:
checked = partner.supplier
partner_account = getattr(partner, 'property_account_%s' % account_type )
if not force_checked is None:
checked = force_checked
if partner_account:
if checked:
# If account already exists, just check if we need to update account name.
if partner_account.name != partner.name:
# We will only update account name if no other partner is using the same account.
value = 'account.account,%d' % partner_account.id
partners = self.pool.get('ir.property').search(cr, uid, [('value_reference','=',value)], context=context)
if len(partners) == 1:
self.pool.get('account.account').write(cr, uid, [partner_account.id], {
'name': partner.name,
}, context)
return
# If it's not possible to unlink the account we will rollback this change
# so the property remains the same. Note that we cannot try to unlink first,
# because in this case it would always fail because of the fact that it's set
# as the account in the partner.
cr.execute('SAVEPOINT remove_account')
self.write(cr, uid, [partner_id], {
'property_account_%s' % account_type : False,
}, context)
try:
# Unlink may raise an exception if the account is already set in another partner
# or if it has account moves.
self.pool.get('account.account').unlink(cr, uid, [partner_account.id], context)
except osv.except_osv:
cr.execute('ROLLBACK TO SAVEPOINT remove_account')
return
cr.execute('RELEASE SAVEPOINT remove_account')
return
if not checked:
return
partner_ref = partner.ref or ''
digits = company.account_digits or 0
# Si no ingreso una referencia se obtiene el siguiente codigo desde una secuencia
code = parent_account.code + '0'*(digits - len(parent_account.code + partner_ref)) + partner_ref
account_id = self.pool.get('account.account').search(cr, uid, [('code','=',code)], context=context)
if account_id:
account_id = account_id[0]
else:
tipo_cuenta = 15 # 15 son Terceros a Cobrar
if account_type != 'receivable':
tipo_cuenta = 16
account_id = self.pool.get('account.account').create(cr, uid, {
'name': partner.name,
'code': code,
'parent_id': parent_account.id,
'user_type': tipo_cuenta,
'reconcile': True,
'type': account_type,
'currency_id' : 2,
}, context)
self.write(cr, uid, [partner_id], {
'property_account_%s' % account_type : account_id,
}, context)
def create(self, cr, uid, vals, context=None):
id = super(res_partner, self).create(cr, uid, vals, context)
self.update_account(cr, uid, id, 'receivable', context)
self.update_account(cr, uid, id, 'payable', context)
return id
def write(self, cr, uid, ids, vals, context=None):
result = super(res_partner, self).write(cr, uid, ids, vals, context)
if 'customer' in vals or 'name' in vals:
for id in ids:
self.update_account(cr, uid, id, 'receivable', context)
if 'supplier' in vals or 'name' in vals:
for id in ids:
self.update_account(cr, uid, id, 'payable', context)
return result
def unlink(self, cr, uid, ids, context=None):
for id in ids:
self.update_account(cr, uid, id, 'receivable', context, force_checked = False)
self.update_account(cr, uid, id, 'payable', context, force_checked = False)
return super(res_partner, self).unlink(cr, uid, ids, context)
res_partner()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| -3,422,568,010,201,253,000
| 42.465278
| 125
| 0.588752
| false
| 4.181029
| false
| false
| false
|
algorhythms/LeetCode
|
623 Add One Row to Tree.py
|
1
|
2451
|
#!/usr/bin/python3
"""
Given the root of a binary tree, then value v and depth d, you need to add a row
of nodes with value v at the given depth d. The root node is at depth 1.
The adding rule is: given a positive integer depth d, for each NOT null tree
nodes N in depth d-1, create two tree nodes with value v as N's left subtree
root and right subtree root. And N's original left subtree should be the left
subtree of the new left subtree root, its original right subtree should be the
right subtree of the new right subtree root. If depth d is 1 that means there is
no depth d-1 at all, then create a tree node with value v as the new root of the
whole original tree, and the original tree is the new root's left subtree.
Example 1:
Input:
A binary tree as following:
4
/ \
2 6
/ \ /
3 1 5
v = 1
d = 2
Output:
4
/ \
1 1
/ \
2 6
/ \ /
3 1 5
Example 2:
Input:
A binary tree as following:
4
/
2
/ \
3 1
v = 1
d = 3
Output:
4
/
2
/ \
1 1
/ \
3 1
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def addOneRow(self, root: TreeNode, v: int, d: int) -> TreeNode:
return self.add(root, v, d, 1, "left")
def add(self, node, v, d, cur_d, child) -> TreeNode:
# use the return value for parent's reference
if cur_d == d:
new = TreeNode(v)
setattr(new, child, node)
return new
if node:
node.left = self.add(node.left, v, d, cur_d + 1, "left")
node.right = self.add(node.right, v, d, cur_d + 1, "right")
return node
class Solution2:
def addOneRow(self, root: TreeNode, v: int, d: int) -> TreeNode:
if d == 1:
node = TreeNode(v)
node.left = root
return node
self.add(self, root, v, d, 1)
return root
def add(self, node, v, d, cur_d) -> None:
if not node:
return
if cur_d + 1 == d:
left = node.left
right = node.right
node.left = TreeNode(v)
node.left.left = left
node.right = TreeNode(v)
node.right.right = right
self.add(node.left, v, d, cur_d + 1)
self.add(node.right, v, d, cur_d + 1)
|
mit
| 1,620,680,028,994,446,600
| 21.694444
| 80
| 0.550796
| false
| 3.366758
| false
| false
| false
|
janusnic/ecommerce
|
ecommerce/extensions/analytics/utils.py
|
1
|
2992
|
from functools import wraps
import logging
from django.conf import settings
logger = logging.getLogger(__name__)
def is_segment_configured():
"""Returns a Boolean indicating if Segment has been configured for use."""
return bool(settings.SEGMENT_KEY)
def parse_tracking_context(user):
"""Extract user and client IDs from a user's tracking context.
Arguments:
user (User): An instance of the User model.
Returns:
Tuple of strings, user_tracking_id and lms_client_id
"""
tracking_context = user.tracking_context or {}
user_tracking_id = tracking_context.get('lms_user_id')
if user_tracking_id is None:
# Even if we cannot extract a good platform user ID from the context, we can still track the
# event with an arbitrary local user ID. However, we need to disambiguate the ID we choose
# since there's no guarantee it won't collide with a platform user ID that may be tracked
# at some point.
user_tracking_id = 'ecommerce-{}'.format(user.id)
lms_client_id = tracking_context.get('lms_client_id')
return user_tracking_id, lms_client_id
def log_exceptions(msg):
"""Log exceptions (avoiding clutter/indentation).
Exceptions are still raised. This module assumes that signal receivers are
being invoked with `send_robust`, or that callers will otherwise mute
exceptions as needed.
"""
def decorator(func): # pylint: disable=missing-docstring
@wraps(func)
def wrapper(*args, **kwargs): # pylint: disable=missing-docstring
try:
return func(*args, **kwargs)
except: # pylint: disable=bare-except
logger.exception(msg)
raise
return wrapper
return decorator
def audit_log(name, **kwargs):
"""DRY helper used to emit an INFO-level log message.
Messages logged with this function are used to construct an audit trail. Log messages
should be emitted immediately after the event they correspond to has occurred and, if
applicable, after the database has been updated. These log messages use a verbose
key-value pair syntax to make it easier to extract fields when parsing the application's
logs.
This function is variadic, accepting a variable number of keyword arguments.
Arguments:
name (str): The name of the message to log. For example, 'payment_received'.
Keyword Arguments:
Indefinite. Keyword arguments are strung together as comma-separated key-value
pairs ordered alphabetically by key in the resulting log message.
Returns:
None
"""
# Joins sorted keyword argument keys and values with an "=", wraps each value
# in quotes, and separates each pair with a comma and a space.
payload = u', '.join([u'{k}="{v}"'.format(k=k, v=v) for k, v in sorted(kwargs.items())])
message = u'{name}: {payload}'.format(name=name, payload=payload)
logger.info(message)
|
agpl-3.0
| -5,083,399,345,460,941,000
| 34.619048
| 100
| 0.680816
| false
| 4.323699
| false
| false
| false
|
ministryofjustice/cla_backend
|
cla_backend/apps/legalaid/management/commands/load_contactforresearchmethods.py
|
1
|
1083
|
from django.core.management import BaseCommand
from legalaid.models import ContactResearchMethod, PersonalDetails
import uuid
from cla_common.constants import RESEARCH_CONTACT_VIA
class Command(BaseCommand):
help = "Creates the contact for research methods default entities AND migrates data from contact_for_research_via field"
def handle(self, *args, **options):
for (value, label) in RESEARCH_CONTACT_VIA:
(method, created) = ContactResearchMethod.objects.get_or_create(
method=value, defaults={"reference": uuid.uuid4()}
)
details_qs = PersonalDetails.objects.filter(
contact_for_research_via=value, contact_for_research_methods__isnull=True
)
self.stdout.write(
"Processing {method}...migrating {count} records from contact_for_research_via field".format(
method=value, count=details_qs.count()
)
)
for details in details_qs:
details.contact_for_research_methods.add(method)
|
mit
| -8,174,029,900,856,867,000
| 44.125
| 124
| 0.649123
| false
| 4.26378
| false
| false
| false
|
shruthiag96/ns3-dev-vns
|
src/lr-wpan/bindings/modulegen__gcc_LP64.py
|
1
|
418364
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.lr_wpan', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanTxOption [enumeration]
module.add_enum('LrWpanTxOption', ['TX_OPTION_NONE', 'TX_OPTION_ACK', 'TX_OPTION_GTS', 'TX_OPTION_INDIRECT'])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyOption [enumeration]
module.add_enum('LrWpanPhyOption', ['IEEE_802_15_4_868MHZ_BPSK', 'IEEE_802_15_4_915MHZ_BPSK', 'IEEE_802_15_4_868MHZ_ASK', 'IEEE_802_15_4_915MHZ_ASK', 'IEEE_802_15_4_868MHZ_OQPSK', 'IEEE_802_15_4_915MHZ_OQPSK', 'IEEE_802_15_4_2_4GHZ_OQPSK', 'IEEE_802_15_4_INVALID_PHY_OPTION'])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyEnumeration [enumeration]
module.add_enum('LrWpanPhyEnumeration', ['IEEE_802_15_4_PHY_BUSY', 'IEEE_802_15_4_PHY_BUSY_RX', 'IEEE_802_15_4_PHY_BUSY_TX', 'IEEE_802_15_4_PHY_FORCE_TRX_OFF', 'IEEE_802_15_4_PHY_IDLE', 'IEEE_802_15_4_PHY_INVALID_PARAMETER', 'IEEE_802_15_4_PHY_RX_ON', 'IEEE_802_15_4_PHY_SUCCESS', 'IEEE_802_15_4_PHY_TRX_OFF', 'IEEE_802_15_4_PHY_TX_ON', 'IEEE_802_15_4_PHY_UNSUPPORTED_ATTRIBUTE', 'IEEE_802_15_4_PHY_READ_ONLY', 'IEEE_802_15_4_PHY_UNSPECIFIED'])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMcpsDataConfirmStatus [enumeration]
module.add_enum('LrWpanMcpsDataConfirmStatus', ['IEEE_802_15_4_SUCCESS', 'IEEE_802_15_4_TRANSACTION_OVERFLOW', 'IEEE_802_15_4_TRANSACTION_EXPIRED', 'IEEE_802_15_4_CHANNEL_ACCESS_FAILURE', 'IEEE_802_15_4_INVALID_ADDRESS', 'IEEE_802_15_4_INVALID_GTS', 'IEEE_802_15_4_NO_ACK', 'IEEE_802_15_4_COUNTER_ERROR', 'IEEE_802_15_4_FRAME_TOO_LONG', 'IEEE_802_15_4_UNAVAILABLE_KEY', 'IEEE_802_15_4_UNSUPPORTED_SECURITY', 'IEEE_802_15_4_INVALID_PARAMETER'])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanAssociationStatus [enumeration]
module.add_enum('LrWpanAssociationStatus', ['ASSOCIATED', 'PAN_AT_CAPACITY', 'PAN_ACCESS_DENIED', 'ASSOCIATED_WITHOUT_ADDRESS', 'DISASSOCIATED'])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPibAttributeIdentifier [enumeration]
module.add_enum('LrWpanPibAttributeIdentifier', ['phyCurrentChannel', 'phyChannelsSupported', 'phyTransmitPower', 'phyCCAMode', 'phyCurrentPage', 'phyMaxFrameDuration', 'phySHRDuration', 'phySymbolsPerOctet'])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMacState [enumeration]
module.add_enum('LrWpanMacState', ['MAC_IDLE', 'MAC_CSMA', 'MAC_SENDING', 'MAC_ACK_PENDING', 'CHANNEL_ACCESS_FAILURE', 'CHANNEL_IDLE', 'SET_PHY_TX_ON'])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanAddressMode [enumeration]
module.add_enum('LrWpanAddressMode', ['NO_PANID_ADDR', 'ADDR_MODE_RESERVED', 'SHORT_ADDR', 'EXT_ADDR'])
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::AsciiTraceHelper [class]
module.add_class('AsciiTraceHelper', import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice [class]
module.add_class('AsciiTraceHelperForDevice', allow_subclassing=True, import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## buffer.h (module 'network'): ns3::Buffer [class]
module.add_class('Buffer', import_from_module='ns.network')
## buffer.h (module 'network'): ns3::Buffer::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::Buffer'])
## packet.h (module 'network'): ns3::ByteTagIterator [class]
module.add_class('ByteTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::ByteTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagIterator'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList [class]
module.add_class('ByteTagList', import_from_module='ns.network')
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator [class]
module.add_class('Iterator', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList'])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::ByteTagList::Iterator'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanEdPower [struct]
module.add_class('LrWpanEdPower')
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyDataAndSymbolRates [struct]
module.add_class('LrWpanPhyDataAndSymbolRates')
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPibAttributes [struct]
module.add_class('LrWpanPhyPibAttributes')
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPpduHeaderSymbolNumber [struct]
module.add_class('LrWpanPhyPpduHeaderSymbolNumber')
## lr-wpan-spectrum-value-helper.h (module 'lr-wpan'): ns3::LrWpanSpectrumValueHelper [class]
module.add_class('LrWpanSpectrumValueHelper')
## mac16-address.h (module 'network'): ns3::Mac16Address [class]
module.add_class('Mac16Address', import_from_module='ns.network')
## mac16-address.h (module 'network'): ns3::Mac16Address [class]
root_module['ns3::Mac16Address'].implicitly_converts_to(root_module['ns3::Address'])
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
module.add_class('Mac48Address', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
## mac64-address.h (module 'network'): ns3::Mac64Address [class]
module.add_class('Mac64Address', import_from_module='ns.network')
## mac64-address.h (module 'network'): ns3::Mac64Address [class]
root_module['ns3::Mac64Address'].implicitly_converts_to(root_module['ns3::Address'])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataConfirmParams [struct]
module.add_class('McpsDataConfirmParams')
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataIndicationParams [struct]
module.add_class('McpsDataIndicationParams')
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataRequestParams [struct]
module.add_class('McpsDataRequestParams')
## net-device-container.h (module 'network'): ns3::NetDeviceContainer [class]
module.add_class('NetDeviceContainer', import_from_module='ns.network')
## node-container.h (module 'network'): ns3::NodeContainer [class]
module.add_class('NodeContainer', import_from_module='ns.network')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## packet-metadata.h (module 'network'): ns3::PacketMetadata [class]
module.add_class('PacketMetadata', import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [struct]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item [enumeration]
module.add_enum('', ['PAYLOAD', 'HEADER', 'TRAILER'], outer_class=root_module['ns3::PacketMetadata::Item'], import_from_module='ns.network')
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator [class]
module.add_class('ItemIterator', import_from_module='ns.network', outer_class=root_module['ns3::PacketMetadata'])
## packet.h (module 'network'): ns3::PacketTagIterator [class]
module.add_class('PacketTagIterator', import_from_module='ns.network')
## packet.h (module 'network'): ns3::PacketTagIterator::Item [class]
module.add_class('Item', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagIterator'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList [class]
module.add_class('PacketTagList', import_from_module='ns.network')
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData [struct]
module.add_class('TagData', import_from_module='ns.network', outer_class=root_module['ns3::PacketTagList'])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData_e [enumeration]
module.add_enum('TagData_e', ['MAX_SIZE'], outer_class=root_module['ns3::PacketTagList::TagData'], import_from_module='ns.network')
## pcap-file.h (module 'network'): ns3::PcapFile [class]
module.add_class('PcapFile', import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::PcapHelper [class]
module.add_class('PcapHelper', import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::PcapHelper [enumeration]
module.add_enum('', ['DLT_NULL', 'DLT_EN10MB', 'DLT_PPP', 'DLT_RAW', 'DLT_IEEE802_11', 'DLT_LINUX_SSL', 'DLT_PRISM_HEADER', 'DLT_IEEE802_11_RADIO', 'DLT_IEEE802_15_4', 'DLT_NETLINK'], outer_class=root_module['ns3::PcapHelper'], import_from_module='ns.network')
## trace-helper.h (module 'network'): ns3::PcapHelperForDevice [class]
module.add_class('PcapHelperForDevice', allow_subclassing=True, import_from_module='ns.network')
## sequence-number.h (module 'network'): ns3::SequenceNumber<unsigned char, signed char> [class]
module.add_class('SequenceNumber8', import_from_module='ns.network')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simulator.h (module 'core'): ns3::Simulator [class]
module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core')
## tag.h (module 'network'): ns3::Tag [class]
module.add_class('Tag', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## nstime.h (module 'core'): ns3::TimeWithUnit [class]
module.add_class('TimeWithUnit', import_from_module='ns.core')
## traced-value.h (module 'core'): ns3::TracedValue<ns3::LrWpanMacState> [class]
module.add_class('TracedValue', template_parameters=['ns3::LrWpanMacState'])
## traced-value.h (module 'core'): ns3::TracedValue<ns3::LrWpanPhyEnumeration> [class]
module.add_class('TracedValue', template_parameters=['ns3::LrWpanPhyEnumeration'])
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t::impl_type [enumeration]
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
## chunk.h (module 'network'): ns3::Chunk [class]
module.add_class('Chunk', import_from_module='ns.network', parent=root_module['ns3::ObjectBase'])
## header.h (module 'network'): ns3::Header [class]
module.add_class('Header', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## lr-wpan-helper.h (module 'lr-wpan'): ns3::LrWpanHelper [class]
module.add_class('LrWpanHelper', parent=[root_module['ns3::PcapHelperForDevice'], root_module['ns3::AsciiTraceHelperForDevice']])
## lr-wpan-lqi-tag.h (module 'lr-wpan'): ns3::LrWpanLqiTag [class]
module.add_class('LrWpanLqiTag', parent=root_module['ns3::Tag'])
## lr-wpan-mac-header.h (module 'lr-wpan'): ns3::LrWpanMacHeader [class]
module.add_class('LrWpanMacHeader', parent=root_module['ns3::Header'])
## lr-wpan-mac-header.h (module 'lr-wpan'): ns3::LrWpanMacHeader::LrWpanMacType [enumeration]
module.add_enum('LrWpanMacType', ['LRWPAN_MAC_BEACON', 'LRWPAN_MAC_DATA', 'LRWPAN_MAC_ACKNOWLEDGMENT', 'LRWPAN_MAC_COMMAND', 'LRWPAN_MAC_RESERVED'], outer_class=root_module['ns3::LrWpanMacHeader'])
## lr-wpan-mac-header.h (module 'lr-wpan'): ns3::LrWpanMacHeader::AddrModeType [enumeration]
module.add_enum('AddrModeType', ['NOADDR', 'RESADDR', 'SHORTADDR', 'EXTADDR'], outer_class=root_module['ns3::LrWpanMacHeader'])
## lr-wpan-mac-header.h (module 'lr-wpan'): ns3::LrWpanMacHeader::KeyIdModeType [enumeration]
module.add_enum('KeyIdModeType', ['IMPLICIT', 'NOKEYSOURCE', 'SHORTKEYSOURCE', 'LONGKEYSOURCE'], outer_class=root_module['ns3::LrWpanMacHeader'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## pcap-file-wrapper.h (module 'network'): ns3::PcapFileWrapper [class]
module.add_class('PcapFileWrapper', import_from_module='ns.network', parent=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::LrWpanInterferenceHelper, ns3::empty, ns3::DefaultDeleter<ns3::LrWpanInterferenceHelper> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::LrWpanInterferenceHelper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::LrWpanInterferenceHelper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::NixVector', 'ns3::empty', 'ns3::DefaultDeleter<ns3::NixVector>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::OutputStreamWrapper', 'ns3::empty', 'ns3::DefaultDeleter<ns3::OutputStreamWrapper>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Packet', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Packet>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SpectrumSignalParameters, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumSignalParameters> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::SpectrumSignalParameters', 'ns3::empty', 'ns3::DefaultDeleter<ns3::SpectrumSignalParameters>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## spectrum-phy.h (module 'spectrum'): ns3::SpectrumPhy [class]
module.add_class('SpectrumPhy', import_from_module='ns.spectrum', parent=root_module['ns3::Object'])
## spectrum-signal-parameters.h (module 'spectrum'): ns3::SpectrumSignalParameters [struct]
module.add_class('SpectrumSignalParameters', import_from_module='ns.spectrum', parent=root_module['ns3::SimpleRefCount< ns3::SpectrumSignalParameters, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumSignalParameters> >'])
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## trailer.h (module 'network'): ns3::Trailer [class]
module.add_class('Trailer', import_from_module='ns.network', parent=root_module['ns3::Chunk'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## boolean.h (module 'core'): ns3::BooleanChecker [class]
module.add_class('BooleanChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## boolean.h (module 'core'): ns3::BooleanValue [class]
module.add_class('BooleanValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## double.h (module 'core'): ns3::DoubleValue [class]
module.add_class('DoubleValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## enum.h (module 'core'): ns3::EnumChecker [class]
module.add_class('EnumChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## enum.h (module 'core'): ns3::EnumValue [class]
module.add_class('EnumValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## event-impl.h (module 'core'): ns3::EventImpl [class]
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
## integer.h (module 'core'): ns3::IntegerValue [class]
module.add_class('IntegerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## lr-wpan-csmaca.h (module 'lr-wpan'): ns3::LrWpanCsmaCa [class]
module.add_class('LrWpanCsmaCa', parent=root_module['ns3::Object'])
## lr-wpan-error-model.h (module 'lr-wpan'): ns3::LrWpanErrorModel [class]
module.add_class('LrWpanErrorModel', parent=root_module['ns3::Object'])
## lr-wpan-interference-helper.h (module 'lr-wpan'): ns3::LrWpanInterferenceHelper [class]
module.add_class('LrWpanInterferenceHelper', parent=root_module['ns3::SimpleRefCount< ns3::LrWpanInterferenceHelper, ns3::empty, ns3::DefaultDeleter<ns3::LrWpanInterferenceHelper> >'])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac [class]
module.add_class('LrWpanMac', parent=root_module['ns3::Object'])
## lr-wpan-mac-trailer.h (module 'lr-wpan'): ns3::LrWpanMacTrailer [class]
module.add_class('LrWpanMacTrailer', parent=root_module['ns3::Trailer'])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhy [class]
module.add_class('LrWpanPhy', parent=root_module['ns3::SpectrumPhy'])
## lr-wpan-spectrum-signal-parameters.h (module 'lr-wpan'): ns3::LrWpanSpectrumSignalParameters [struct]
module.add_class('LrWpanSpectrumSignalParameters', parent=root_module['ns3::SpectrumSignalParameters'])
## mac16-address.h (module 'network'): ns3::Mac16AddressChecker [class]
module.add_class('Mac16AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## mac16-address.h (module 'network'): ns3::Mac16AddressValue [class]
module.add_class('Mac16AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class]
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class]
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## mac64-address.h (module 'network'): ns3::Mac64AddressChecker [class]
module.add_class('Mac64AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## mac64-address.h (module 'network'): ns3::Mac64AddressValue [class]
module.add_class('Mac64AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## net-device.h (module 'network'): ns3::NetDevice [class]
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
## nix-vector.h (module 'network'): ns3::NixVector [class]
module.add_class('NixVector', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
## node.h (module 'network'): ns3::Node [class]
module.add_class('Node', import_from_module='ns.network', parent=root_module['ns3::Object'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper [class]
module.add_class('OutputStreamWrapper', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
## packet.h (module 'network'): ns3::Packet [class]
module.add_class('Packet', import_from_module='ns.network', parent=root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## uinteger.h (module 'core'): ns3::UintegerValue [class]
module.add_class('UintegerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## address.h (module 'network'): ns3::AddressChecker [class]
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## address.h (module 'network'): ns3::AddressValue [class]
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## lr-wpan-net-device.h (module 'lr-wpan'): ns3::LrWpanNetDevice [class]
module.add_class('LrWpanNetDevice', parent=root_module['ns3::NetDevice'])
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::PlmeCcaConfirmCallback')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::PlmeCcaConfirmCallback*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::PlmeCcaConfirmCallback&')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, unsigned char, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::PlmeEdConfirmCallback')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, unsigned char, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::PlmeEdConfirmCallback*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, unsigned char, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::PlmeEdConfirmCallback&')
typehandlers.add_type_alias(u'ns3::Callback< void, unsigned int, ns3::Ptr< ns3::Packet >, unsigned char, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::PdDataIndicationCallback')
typehandlers.add_type_alias(u'ns3::Callback< void, unsigned int, ns3::Ptr< ns3::Packet >, unsigned char, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::PdDataIndicationCallback*')
typehandlers.add_type_alias(u'ns3::Callback< void, unsigned int, ns3::Ptr< ns3::Packet >, unsigned char, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::PdDataIndicationCallback&')
typehandlers.add_type_alias(u'ns3::SequenceNumber< short unsigned int, short int >', u'ns3::SequenceNumber16')
typehandlers.add_type_alias(u'ns3::SequenceNumber< short unsigned int, short int >*', u'ns3::SequenceNumber16*')
typehandlers.add_type_alias(u'ns3::SequenceNumber< short unsigned int, short int >&', u'ns3::SequenceNumber16&')
typehandlers.add_type_alias(u'ns3::SequenceNumber< unsigned int, int >', u'ns3::SequenceNumber32')
typehandlers.add_type_alias(u'ns3::SequenceNumber< unsigned int, int >*', u'ns3::SequenceNumber32*')
typehandlers.add_type_alias(u'ns3::SequenceNumber< unsigned int, int >&', u'ns3::SequenceNumber32&')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanMacState, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::LrWpanMacStateCallback')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanMacState, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::LrWpanMacStateCallback*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanMacState, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::LrWpanMacStateCallback&')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::LrWpanPibAttributeIdentifier, ns3::LrWpanPhyPibAttributes *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::PlmeGetAttributeConfirmCallback')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::LrWpanPibAttributeIdentifier, ns3::LrWpanPhyPibAttributes *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::PlmeGetAttributeConfirmCallback*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::LrWpanPibAttributeIdentifier, ns3::LrWpanPhyPibAttributes *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::PlmeGetAttributeConfirmCallback&')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::PdDataConfirmCallback')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::PdDataConfirmCallback*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::PdDataConfirmCallback&')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::McpsDataIndicationParams, ns3::Ptr< ns3::Packet >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::McpsDataIndicationCallback')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::McpsDataIndicationParams, ns3::Ptr< ns3::Packet >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::McpsDataIndicationCallback*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::McpsDataIndicationParams, ns3::Ptr< ns3::Packet >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::McpsDataIndicationCallback&')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::McpsDataConfirmParams, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::McpsDataConfirmCallback')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::McpsDataConfirmParams, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::McpsDataConfirmCallback*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::McpsDataConfirmParams, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::McpsDataConfirmCallback&')
typehandlers.add_type_alias(u'ns3::SequenceNumber< unsigned char, signed char >', u'ns3::SequenceNumber8')
typehandlers.add_type_alias(u'ns3::SequenceNumber< unsigned char, signed char >*', u'ns3::SequenceNumber8*')
typehandlers.add_type_alias(u'ns3::SequenceNumber< unsigned char, signed char >&', u'ns3::SequenceNumber8&')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::LrWpanPibAttributeIdentifier, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::PlmeSetAttributeConfirmCallback')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::LrWpanPibAttributeIdentifier, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::PlmeSetAttributeConfirmCallback*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::LrWpanPibAttributeIdentifier, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::PlmeSetAttributeConfirmCallback&')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', u'ns3::PlmeSetTRXStateConfirmCallback')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >*', u'ns3::PlmeSetTRXStateConfirmCallback*')
typehandlers.add_type_alias(u'ns3::Callback< void, ns3::LrWpanPhyEnumeration, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >&', u'ns3::PlmeSetTRXStateConfirmCallback&')
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
## Register a nested module for the namespace TracedValueCallback
nested_module = module.add_cpp_namespace('TracedValueCallback')
register_types_ns3_TracedValueCallback(nested_module)
## Register a nested module for the namespace internal
nested_module = module.add_cpp_namespace('internal')
register_types_ns3_internal(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_types_ns3_TracedValueCallback(module):
root_module = module.get_root()
typehandlers.add_type_alias(u'void ( * ) ( bool, bool ) *', u'ns3::TracedValueCallback::Bool')
typehandlers.add_type_alias(u'void ( * ) ( bool, bool ) **', u'ns3::TracedValueCallback::Bool*')
typehandlers.add_type_alias(u'void ( * ) ( bool, bool ) *&', u'ns3::TracedValueCallback::Bool&')
typehandlers.add_type_alias(u'void ( * ) ( double, double ) *', u'ns3::TracedValueCallback::Double')
typehandlers.add_type_alias(u'void ( * ) ( double, double ) **', u'ns3::TracedValueCallback::Double*')
typehandlers.add_type_alias(u'void ( * ) ( double, double ) *&', u'ns3::TracedValueCallback::Double&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::SequenceNumber32, ns3::SequenceNumber32 ) *', u'ns3::TracedValueCallback::SequenceNumber32')
typehandlers.add_type_alias(u'void ( * ) ( ns3::SequenceNumber32, ns3::SequenceNumber32 ) **', u'ns3::TracedValueCallback::SequenceNumber32*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::SequenceNumber32, ns3::SequenceNumber32 ) *&', u'ns3::TracedValueCallback::SequenceNumber32&')
typehandlers.add_type_alias(u'void ( * ) ( uint8_t, uint8_t ) *', u'ns3::TracedValueCallback::Uint8')
typehandlers.add_type_alias(u'void ( * ) ( uint8_t, uint8_t ) **', u'ns3::TracedValueCallback::Uint8*')
typehandlers.add_type_alias(u'void ( * ) ( uint8_t, uint8_t ) *&', u'ns3::TracedValueCallback::Uint8&')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t ) *', u'ns3::TracedValueCallback::Uint32')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t ) **', u'ns3::TracedValueCallback::Uint32*')
typehandlers.add_type_alias(u'void ( * ) ( uint32_t, uint32_t ) *&', u'ns3::TracedValueCallback::Uint32&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *', u'ns3::TracedValueCallback::Time')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) **', u'ns3::TracedValueCallback::Time*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *&', u'ns3::TracedValueCallback::Time&')
typehandlers.add_type_alias(u'void ( * ) ( int16_t, int16_t ) *', u'ns3::TracedValueCallback::Int16')
typehandlers.add_type_alias(u'void ( * ) ( int16_t, int16_t ) **', u'ns3::TracedValueCallback::Int16*')
typehandlers.add_type_alias(u'void ( * ) ( int16_t, int16_t ) *&', u'ns3::TracedValueCallback::Int16&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::LrWpanPhyEnumeration, ns3::LrWpanPhyEnumeration ) *', u'ns3::TracedValueCallback::LrWpanPhyEnumeration')
typehandlers.add_type_alias(u'void ( * ) ( ns3::LrWpanPhyEnumeration, ns3::LrWpanPhyEnumeration ) **', u'ns3::TracedValueCallback::LrWpanPhyEnumeration*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::LrWpanPhyEnumeration, ns3::LrWpanPhyEnumeration ) *&', u'ns3::TracedValueCallback::LrWpanPhyEnumeration&')
typehandlers.add_type_alias(u'void ( * ) ( int32_t, int32_t ) *', u'ns3::TracedValueCallback::Int32')
typehandlers.add_type_alias(u'void ( * ) ( int32_t, int32_t ) **', u'ns3::TracedValueCallback::Int32*')
typehandlers.add_type_alias(u'void ( * ) ( int32_t, int32_t ) *&', u'ns3::TracedValueCallback::Int32&')
typehandlers.add_type_alias(u'void ( * ) ( int8_t, int8_t ) *', u'ns3::TracedValueCallback::Int8')
typehandlers.add_type_alias(u'void ( * ) ( int8_t, int8_t ) **', u'ns3::TracedValueCallback::Int8*')
typehandlers.add_type_alias(u'void ( * ) ( int8_t, int8_t ) *&', u'ns3::TracedValueCallback::Int8&')
typehandlers.add_type_alias(u'void ( * ) ( uint16_t, uint16_t ) *', u'ns3::TracedValueCallback::Uint16')
typehandlers.add_type_alias(u'void ( * ) ( uint16_t, uint16_t ) **', u'ns3::TracedValueCallback::Uint16*')
typehandlers.add_type_alias(u'void ( * ) ( uint16_t, uint16_t ) *&', u'ns3::TracedValueCallback::Uint16&')
typehandlers.add_type_alias(u'void ( * ) ( ns3::LrWpanMacState, ns3::LrWpanMacState ) *', u'ns3::TracedValueCallback::LrWpanMacState')
typehandlers.add_type_alias(u'void ( * ) ( ns3::LrWpanMacState, ns3::LrWpanMacState ) **', u'ns3::TracedValueCallback::LrWpanMacState*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::LrWpanMacState, ns3::LrWpanMacState ) *&', u'ns3::TracedValueCallback::LrWpanMacState&')
def register_types_ns3_internal(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AsciiTraceHelper_methods(root_module, root_module['ns3::AsciiTraceHelper'])
register_Ns3AsciiTraceHelperForDevice_methods(root_module, root_module['ns3::AsciiTraceHelperForDevice'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3Buffer_methods(root_module, root_module['ns3::Buffer'])
register_Ns3BufferIterator_methods(root_module, root_module['ns3::Buffer::Iterator'])
register_Ns3ByteTagIterator_methods(root_module, root_module['ns3::ByteTagIterator'])
register_Ns3ByteTagIteratorItem_methods(root_module, root_module['ns3::ByteTagIterator::Item'])
register_Ns3ByteTagList_methods(root_module, root_module['ns3::ByteTagList'])
register_Ns3ByteTagListIterator_methods(root_module, root_module['ns3::ByteTagList::Iterator'])
register_Ns3ByteTagListIteratorItem_methods(root_module, root_module['ns3::ByteTagList::Iterator::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3LrWpanEdPower_methods(root_module, root_module['ns3::LrWpanEdPower'])
register_Ns3LrWpanPhyDataAndSymbolRates_methods(root_module, root_module['ns3::LrWpanPhyDataAndSymbolRates'])
register_Ns3LrWpanPhyPibAttributes_methods(root_module, root_module['ns3::LrWpanPhyPibAttributes'])
register_Ns3LrWpanPhyPpduHeaderSymbolNumber_methods(root_module, root_module['ns3::LrWpanPhyPpduHeaderSymbolNumber'])
register_Ns3LrWpanSpectrumValueHelper_methods(root_module, root_module['ns3::LrWpanSpectrumValueHelper'])
register_Ns3Mac16Address_methods(root_module, root_module['ns3::Mac16Address'])
register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address'])
register_Ns3Mac64Address_methods(root_module, root_module['ns3::Mac64Address'])
register_Ns3McpsDataConfirmParams_methods(root_module, root_module['ns3::McpsDataConfirmParams'])
register_Ns3McpsDataIndicationParams_methods(root_module, root_module['ns3::McpsDataIndicationParams'])
register_Ns3McpsDataRequestParams_methods(root_module, root_module['ns3::McpsDataRequestParams'])
register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer'])
register_Ns3NodeContainer_methods(root_module, root_module['ns3::NodeContainer'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3PacketMetadata_methods(root_module, root_module['ns3::PacketMetadata'])
register_Ns3PacketMetadataItem_methods(root_module, root_module['ns3::PacketMetadata::Item'])
register_Ns3PacketMetadataItemIterator_methods(root_module, root_module['ns3::PacketMetadata::ItemIterator'])
register_Ns3PacketTagIterator_methods(root_module, root_module['ns3::PacketTagIterator'])
register_Ns3PacketTagIteratorItem_methods(root_module, root_module['ns3::PacketTagIterator::Item'])
register_Ns3PacketTagList_methods(root_module, root_module['ns3::PacketTagList'])
register_Ns3PacketTagListTagData_methods(root_module, root_module['ns3::PacketTagList::TagData'])
register_Ns3PcapFile_methods(root_module, root_module['ns3::PcapFile'])
register_Ns3PcapHelper_methods(root_module, root_module['ns3::PcapHelper'])
register_Ns3PcapHelperForDevice_methods(root_module, root_module['ns3::PcapHelperForDevice'])
register_Ns3SequenceNumber8_methods(root_module, root_module['ns3::SequenceNumber8'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])
register_Ns3Tag_methods(root_module, root_module['ns3::Tag'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit'])
register_Ns3TracedValue__Ns3LrWpanMacState_methods(root_module, root_module['ns3::TracedValue< ns3::LrWpanMacState >'])
register_Ns3TracedValue__Ns3LrWpanPhyEnumeration_methods(root_module, root_module['ns3::TracedValue< ns3::LrWpanPhyEnumeration >'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Chunk_methods(root_module, root_module['ns3::Chunk'])
register_Ns3Header_methods(root_module, root_module['ns3::Header'])
register_Ns3LrWpanHelper_methods(root_module, root_module['ns3::LrWpanHelper'])
register_Ns3LrWpanLqiTag_methods(root_module, root_module['ns3::LrWpanLqiTag'])
register_Ns3LrWpanMacHeader_methods(root_module, root_module['ns3::LrWpanMacHeader'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3PcapFileWrapper_methods(root_module, root_module['ns3::PcapFileWrapper'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3LrWpanInterferenceHelper_Ns3Empty_Ns3DefaultDeleter__lt__ns3LrWpanInterferenceHelper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::LrWpanInterferenceHelper, ns3::empty, ns3::DefaultDeleter<ns3::LrWpanInterferenceHelper> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3SpectrumSignalParameters_Ns3Empty_Ns3DefaultDeleter__lt__ns3SpectrumSignalParameters__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::SpectrumSignalParameters, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumSignalParameters> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3SpectrumPhy_methods(root_module, root_module['ns3::SpectrumPhy'])
register_Ns3SpectrumSignalParameters_methods(root_module, root_module['ns3::SpectrumSignalParameters'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3Trailer_methods(root_module, root_module['ns3::Trailer'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3BooleanChecker_methods(root_module, root_module['ns3::BooleanChecker'])
register_Ns3BooleanValue_methods(root_module, root_module['ns3::BooleanValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3DoubleValue_methods(root_module, root_module['ns3::DoubleValue'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EnumChecker_methods(root_module, root_module['ns3::EnumChecker'])
register_Ns3EnumValue_methods(root_module, root_module['ns3::EnumValue'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3IntegerValue_methods(root_module, root_module['ns3::IntegerValue'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3LrWpanCsmaCa_methods(root_module, root_module['ns3::LrWpanCsmaCa'])
register_Ns3LrWpanErrorModel_methods(root_module, root_module['ns3::LrWpanErrorModel'])
register_Ns3LrWpanInterferenceHelper_methods(root_module, root_module['ns3::LrWpanInterferenceHelper'])
register_Ns3LrWpanMac_methods(root_module, root_module['ns3::LrWpanMac'])
register_Ns3LrWpanMacTrailer_methods(root_module, root_module['ns3::LrWpanMacTrailer'])
register_Ns3LrWpanPhy_methods(root_module, root_module['ns3::LrWpanPhy'])
register_Ns3LrWpanSpectrumSignalParameters_methods(root_module, root_module['ns3::LrWpanSpectrumSignalParameters'])
register_Ns3Mac16AddressChecker_methods(root_module, root_module['ns3::Mac16AddressChecker'])
register_Ns3Mac16AddressValue_methods(root_module, root_module['ns3::Mac16AddressValue'])
register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker'])
register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue'])
register_Ns3Mac64AddressChecker_methods(root_module, root_module['ns3::Mac64AddressChecker'])
register_Ns3Mac64AddressValue_methods(root_module, root_module['ns3::Mac64AddressValue'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3NixVector_methods(root_module, root_module['ns3::NixVector'])
register_Ns3Node_methods(root_module, root_module['ns3::Node'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3OutputStreamWrapper_methods(root_module, root_module['ns3::OutputStreamWrapper'])
register_Ns3Packet_methods(root_module, root_module['ns3::Packet'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3UintegerValue_methods(root_module, root_module['ns3::UintegerValue'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
register_Ns3LrWpanNetDevice_methods(root_module, root_module['ns3::LrWpanNetDevice'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## address.h (module 'network'): ns3::Address::Address() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor]
cls.add_constructor([param('ns3::Address const &', 'address')])
## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]
cls.add_method('CheckCompatible',
'bool',
[param('uint8_t', 'type'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyAllFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]
cls.add_method('CopyAllTo',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'uint32_t',
[param('uint8_t *', 'buffer')],
is_const=True)
## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'buffer')])
## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]
cls.add_method('IsInvalid',
'bool',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]
cls.add_method('IsMatchingType',
'bool',
[param('uint8_t', 'type')],
is_const=True)
## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]
cls.add_method('Register',
'uint8_t',
[],
is_static=True)
## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'buffer')],
is_const=True)
return
def register_Ns3AsciiTraceHelper_methods(root_module, cls):
## trace-helper.h (module 'network'): ns3::AsciiTraceHelper::AsciiTraceHelper(ns3::AsciiTraceHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AsciiTraceHelper const &', 'arg0')])
## trace-helper.h (module 'network'): ns3::AsciiTraceHelper::AsciiTraceHelper() [constructor]
cls.add_constructor([])
## trace-helper.h (module 'network'): ns3::Ptr<ns3::OutputStreamWrapper> ns3::AsciiTraceHelper::CreateFileStream(std::string filename, std::_Ios_Openmode filemode=std::ios_base::out) [member function]
cls.add_method('CreateFileStream',
'ns3::Ptr< ns3::OutputStreamWrapper >',
[param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode', default_value='std::ios_base::out')])
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDequeueSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultDequeueSinkWithContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDequeueSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultDequeueSinkWithoutContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDropSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultDropSinkWithContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultDropSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultDropSinkWithoutContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultEnqueueSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultEnqueueSinkWithContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultEnqueueSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultEnqueueSinkWithoutContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultReceiveSinkWithContext(ns3::Ptr<ns3::OutputStreamWrapper> file, std::string context, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultReceiveSinkWithContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('std::string', 'context'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): static void ns3::AsciiTraceHelper::DefaultReceiveSinkWithoutContext(ns3::Ptr<ns3::OutputStreamWrapper> file, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('DefaultReceiveSinkWithoutContext',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'file'), param('ns3::Ptr< ns3::Packet const >', 'p')],
is_static=True)
## trace-helper.h (module 'network'): std::string ns3::AsciiTraceHelper::GetFilenameFromDevice(std::string prefix, ns3::Ptr<ns3::NetDevice> device, bool useObjectNames=true) [member function]
cls.add_method('GetFilenameFromDevice',
'std::string',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'useObjectNames', default_value='true')])
## trace-helper.h (module 'network'): std::string ns3::AsciiTraceHelper::GetFilenameFromInterfacePair(std::string prefix, ns3::Ptr<ns3::Object> object, uint32_t interface, bool useObjectNames=true) [member function]
cls.add_method('GetFilenameFromInterfacePair',
'std::string',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::Object >', 'object'), param('uint32_t', 'interface'), param('bool', 'useObjectNames', default_value='true')])
return
def register_Ns3AsciiTraceHelperForDevice_methods(root_module, cls):
## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice::AsciiTraceHelperForDevice(ns3::AsciiTraceHelperForDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AsciiTraceHelperForDevice const &', 'arg0')])
## trace-helper.h (module 'network'): ns3::AsciiTraceHelperForDevice::AsciiTraceHelperForDevice() [constructor]
cls.add_constructor([])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename=false) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename', default_value='false')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::Ptr<ns3::NetDevice> nd) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::Ptr< ns3::NetDevice >', 'nd')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, std::string ndName, bool explicitFilename=false) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('std::string', 'ndName'), param('bool', 'explicitFilename', default_value='false')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string ndName) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'ndName')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::NetDeviceContainer d) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('ns3::NetDeviceContainer', 'd')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::NetDeviceContainer d) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::NetDeviceContainer', 'd')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, ns3::NodeContainer n) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('ns3::NodeContainer', 'n')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, ns3::NodeContainer n) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('ns3::NodeContainer', 'n')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(std::string prefix, uint32_t nodeid, uint32_t deviceid, bool explicitFilename) [member function]
cls.add_method('EnableAscii',
'void',
[param('std::string', 'prefix'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid'), param('bool', 'explicitFilename')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAscii(ns3::Ptr<ns3::OutputStreamWrapper> stream, uint32_t nodeid, uint32_t deviceid) [member function]
cls.add_method('EnableAscii',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiAll(std::string prefix) [member function]
cls.add_method('EnableAsciiAll',
'void',
[param('std::string', 'prefix')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiAll(ns3::Ptr<ns3::OutputStreamWrapper> stream) [member function]
cls.add_method('EnableAsciiAll',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')])
## trace-helper.h (module 'network'): void ns3::AsciiTraceHelperForDevice::EnableAsciiInternal(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename) [member function]
cls.add_method('EnableAsciiInternal',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3Buffer_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Buffer() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(uint32_t dataSize, bool initialize) [constructor]
cls.add_constructor([param('uint32_t', 'dataSize'), param('bool', 'initialize')])
## buffer.h (module 'network'): ns3::Buffer::Buffer(ns3::Buffer const & o) [copy constructor]
cls.add_constructor([param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(uint32_t end) [member function]
cls.add_method('AddAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtEnd(ns3::Buffer const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Buffer const &', 'o')])
## buffer.h (module 'network'): void ns3::Buffer::AddAtStart(uint32_t start) [member function]
cls.add_method('AddAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::Begin() const [member function]
cls.add_method('Begin',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## buffer.h (module 'network'): ns3::Buffer ns3::Buffer::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Buffer',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): ns3::Buffer::Iterator ns3::Buffer::End() const [member function]
cls.add_method('End',
'ns3::Buffer::Iterator',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): uint8_t const * ns3::Buffer::PeekData() const [member function]
cls.add_method('PeekData',
'uint8_t const *',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3BufferIterator_methods(root_module, cls):
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator(ns3::Buffer::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Buffer::Iterator const &', 'arg0')])
## buffer.h (module 'network'): ns3::Buffer::Iterator::Iterator() [constructor]
cls.add_constructor([])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::CalculateIpChecksum(uint16_t size, uint32_t initialChecksum) [member function]
cls.add_method('CalculateIpChecksum',
'uint16_t',
[param('uint16_t', 'size'), param('uint32_t', 'initialChecksum')])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetDistanceFrom(ns3::Buffer::Iterator const & o) const [member function]
cls.add_method('GetDistanceFrom',
'uint32_t',
[param('ns3::Buffer::Iterator const &', 'o')],
is_const=True)
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsEnd() const [member function]
cls.add_method('IsEnd',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): bool ns3::Buffer::Iterator::IsStart() const [member function]
cls.add_method('IsStart',
'bool',
[],
is_const=True)
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next() [member function]
cls.add_method('Next',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Next(uint32_t delta) [member function]
cls.add_method('Next',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::PeekU8() [member function]
cls.add_method('PeekU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev() [member function]
cls.add_method('Prev',
'void',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Prev(uint32_t delta) [member function]
cls.add_method('Prev',
'void',
[param('uint32_t', 'delta')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Read(ns3::Buffer::Iterator start, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('uint32_t', 'size')])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadLsbtohU16() [member function]
cls.add_method('ReadLsbtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadLsbtohU32() [member function]
cls.add_method('ReadLsbtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadLsbtohU64() [member function]
cls.add_method('ReadLsbtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadNtohU16() [member function]
cls.add_method('ReadNtohU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadNtohU32() [member function]
cls.add_method('ReadNtohU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadNtohU64() [member function]
cls.add_method('ReadNtohU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint16_t ns3::Buffer::Iterator::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## buffer.h (module 'network'): uint32_t ns3::Buffer::Iterator::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## buffer.h (module 'network'): uint64_t ns3::Buffer::Iterator::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## buffer.h (module 'network'): uint8_t ns3::Buffer::Iterator::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::Write(ns3::Buffer::Iterator start, ns3::Buffer::Iterator end) [member function]
cls.add_method('Write',
'void',
[param('ns3::Buffer::Iterator', 'start'), param('ns3::Buffer::Iterator', 'end')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU16(uint16_t data) [member function]
cls.add_method('WriteHtolsbU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU32(uint32_t data) [member function]
cls.add_method('WriteHtolsbU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtolsbU64(uint64_t data) [member function]
cls.add_method('WriteHtolsbU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU16(uint16_t data) [member function]
cls.add_method('WriteHtonU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU32(uint32_t data) [member function]
cls.add_method('WriteHtonU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteHtonU64(uint64_t data) [member function]
cls.add_method('WriteHtonU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU64(uint64_t data) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data')])
## buffer.h (module 'network'): void ns3::Buffer::Iterator::WriteU8(uint8_t data, uint32_t len) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'data'), param('uint32_t', 'len')])
return
def register_Ns3ByteTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::ByteTagIterator(ns3::ByteTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::ByteTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator::Item ns3::ByteTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagIterator::Item',
[])
return
def register_Ns3ByteTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::ByteTagIterator::Item::Item(ns3::ByteTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetEnd() const [member function]
cls.add_method('GetEnd',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::ByteTagIterator::Item::GetStart() const [member function]
cls.add_method('GetStart',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): void ns3::ByteTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::ByteTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3ByteTagList_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList() [constructor]
cls.add_constructor([])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::ByteTagList(ns3::ByteTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): ns3::TagBuffer ns3::ByteTagList::Add(ns3::TypeId tid, uint32_t bufferSize, int32_t start, int32_t end) [member function]
cls.add_method('Add',
'ns3::TagBuffer',
[param('ns3::TypeId', 'tid'), param('uint32_t', 'bufferSize'), param('int32_t', 'start'), param('int32_t', 'end')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Add(ns3::ByteTagList const & o) [member function]
cls.add_method('Add',
'void',
[param('ns3::ByteTagList const &', 'o')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtEnd(int32_t appendOffset) [member function]
cls.add_method('AddAtEnd',
'void',
[param('int32_t', 'appendOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::AddAtStart(int32_t prependOffset) [member function]
cls.add_method('AddAtStart',
'void',
[param('int32_t', 'prependOffset')])
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::Adjust(int32_t adjustment) [member function]
cls.add_method('Adjust',
'void',
[param('int32_t', 'adjustment')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator ns3::ByteTagList::Begin(int32_t offsetStart, int32_t offsetEnd) const [member function]
cls.add_method('Begin',
'ns3::ByteTagList::Iterator',
[param('int32_t', 'offsetStart'), param('int32_t', 'offsetEnd')],
is_const=True)
## byte-tag-list.h (module 'network'): void ns3::ByteTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
return
def register_Ns3ByteTagListIterator_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Iterator(ns3::ByteTagList::Iterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator const &', 'arg0')])
## byte-tag-list.h (module 'network'): uint32_t ns3::ByteTagList::Iterator::GetOffsetStart() const [member function]
cls.add_method('GetOffsetStart',
'uint32_t',
[],
is_const=True)
## byte-tag-list.h (module 'network'): bool ns3::ByteTagList::Iterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item ns3::ByteTagList::Iterator::Next() [member function]
cls.add_method('Next',
'ns3::ByteTagList::Iterator::Item',
[])
return
def register_Ns3ByteTagListIteratorItem_methods(root_module, cls):
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::ByteTagList::Iterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ByteTagList::Iterator::Item const &', 'arg0')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::Item(ns3::TagBuffer buf) [constructor]
cls.add_constructor([param('ns3::TagBuffer', 'buf')])
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::buf [variable]
cls.add_instance_attribute('buf', 'ns3::TagBuffer', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::end [variable]
cls.add_instance_attribute('end', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::size [variable]
cls.add_instance_attribute('size', 'uint32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::start [variable]
cls.add_instance_attribute('start', 'int32_t', is_const=False)
## byte-tag-list.h (module 'network'): ns3::ByteTagList::Iterator::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
return
def register_Ns3EventId_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('==')
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventId const &', 'arg0')])
## event-id.h (module 'core'): ns3::EventId::EventId() [constructor]
cls.add_constructor([])
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])
## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function]
cls.add_method('GetTs',
'uint64_t',
[],
is_const=True)
## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function]
cls.add_method('PeekEventImpl',
'ns3::EventImpl *',
[],
is_const=True)
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3Ipv4Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]
cls.add_constructor([param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('CombineMask',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv4Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv4Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('GetSubnetDirectedBroadcast',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Address const &', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]
cls.add_method('IsLocalMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('IsSubnetDirectedBroadcast',
'bool',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
return
def register_Ns3Ipv4Mask_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]
cls.add_constructor([param('uint32_t', 'mask')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]
cls.add_constructor([param('char const *', 'mask')])
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]
cls.add_method('GetInverse',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint16_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Mask', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'mask')])
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]
cls.add_constructor([param('uint8_t *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function]
cls.add_method('CombinePrefix',
'ns3::Ipv6Address',
[param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv6Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv6Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]
cls.add_method('GetAllHostsMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]
cls.add_method('GetAllNodesMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]
cls.add_method('GetAllRoutersMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function]
cls.add_method('GetIpv4MappedAddress',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function]
cls.add_method('IsAllHostsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]
cls.add_method('IsAllNodesMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]
cls.add_method('IsAllRoutersMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function]
cls.add_method('IsDocumentation',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Address const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function]
cls.add_method('IsIpv4MappedAddress',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]
cls.add_method('IsLinkLocal',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function]
cls.add_method('IsLinkLocalMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]
cls.add_method('IsSolicitedMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function]
cls.add_method('MakeIpv4MappedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv4Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]
cls.add_method('MakeSolicitedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv6Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]
cls.add_method('Set',
'void',
[param('uint8_t *', 'address')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]
cls.add_constructor([param('char const *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]
cls.add_constructor([param('uint8_t', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Prefix const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
return
def register_Ns3LrWpanEdPower_methods(root_module, cls):
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanEdPower::LrWpanEdPower() [constructor]
cls.add_constructor([])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanEdPower::LrWpanEdPower(ns3::LrWpanEdPower const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LrWpanEdPower const &', 'arg0')])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanEdPower::averagePower [variable]
cls.add_instance_attribute('averagePower', 'double', is_const=False)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanEdPower::lastUpdate [variable]
cls.add_instance_attribute('lastUpdate', 'ns3::Time', is_const=False)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanEdPower::measurementLength [variable]
cls.add_instance_attribute('measurementLength', 'ns3::Time', is_const=False)
return
def register_Ns3LrWpanPhyDataAndSymbolRates_methods(root_module, cls):
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyDataAndSymbolRates::LrWpanPhyDataAndSymbolRates() [constructor]
cls.add_constructor([])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyDataAndSymbolRates::LrWpanPhyDataAndSymbolRates(ns3::LrWpanPhyDataAndSymbolRates const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LrWpanPhyDataAndSymbolRates const &', 'arg0')])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyDataAndSymbolRates::bitRate [variable]
cls.add_instance_attribute('bitRate', 'double', is_const=False)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyDataAndSymbolRates::symbolRate [variable]
cls.add_instance_attribute('symbolRate', 'double', is_const=False)
return
def register_Ns3LrWpanPhyPibAttributes_methods(root_module, cls):
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPibAttributes::LrWpanPhyPibAttributes() [constructor]
cls.add_constructor([])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPibAttributes::LrWpanPhyPibAttributes(ns3::LrWpanPhyPibAttributes const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LrWpanPhyPibAttributes const &', 'arg0')])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPibAttributes::phyCCAMode [variable]
cls.add_instance_attribute('phyCCAMode', 'uint8_t', is_const=False)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPibAttributes::phyChannelsSupported [variable]
cls.add_instance_attribute('phyChannelsSupported', 'uint32_t [ 32 ]', is_const=False)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPibAttributes::phyCurrentChannel [variable]
cls.add_instance_attribute('phyCurrentChannel', 'uint8_t', is_const=False)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPibAttributes::phyCurrentPage [variable]
cls.add_instance_attribute('phyCurrentPage', 'uint32_t', is_const=False)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPibAttributes::phyMaxFrameDuration [variable]
cls.add_instance_attribute('phyMaxFrameDuration', 'uint32_t', is_const=False)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPibAttributes::phySHRDuration [variable]
cls.add_instance_attribute('phySHRDuration', 'uint32_t', is_const=False)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPibAttributes::phySymbolsPerOctet [variable]
cls.add_instance_attribute('phySymbolsPerOctet', 'double', is_const=False)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPibAttributes::phyTransmitPower [variable]
cls.add_instance_attribute('phyTransmitPower', 'uint8_t', is_const=False)
return
def register_Ns3LrWpanPhyPpduHeaderSymbolNumber_methods(root_module, cls):
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPpduHeaderSymbolNumber::LrWpanPhyPpduHeaderSymbolNumber() [constructor]
cls.add_constructor([])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPpduHeaderSymbolNumber::LrWpanPhyPpduHeaderSymbolNumber(ns3::LrWpanPhyPpduHeaderSymbolNumber const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LrWpanPhyPpduHeaderSymbolNumber const &', 'arg0')])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPpduHeaderSymbolNumber::phr [variable]
cls.add_instance_attribute('phr', 'double', is_const=False)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPpduHeaderSymbolNumber::shrPreamble [variable]
cls.add_instance_attribute('shrPreamble', 'double', is_const=False)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhyPpduHeaderSymbolNumber::shrSfd [variable]
cls.add_instance_attribute('shrSfd', 'double', is_const=False)
return
def register_Ns3LrWpanSpectrumValueHelper_methods(root_module, cls):
## lr-wpan-spectrum-value-helper.h (module 'lr-wpan'): ns3::LrWpanSpectrumValueHelper::LrWpanSpectrumValueHelper(ns3::LrWpanSpectrumValueHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LrWpanSpectrumValueHelper const &', 'arg0')])
## lr-wpan-spectrum-value-helper.h (module 'lr-wpan'): ns3::LrWpanSpectrumValueHelper::LrWpanSpectrumValueHelper() [constructor]
cls.add_constructor([])
## lr-wpan-spectrum-value-helper.h (module 'lr-wpan'): ns3::Ptr<ns3::SpectrumValue> ns3::LrWpanSpectrumValueHelper::CreateNoisePowerSpectralDensity(uint32_t channel) [member function]
cls.add_method('CreateNoisePowerSpectralDensity',
'ns3::Ptr< ns3::SpectrumValue >',
[param('uint32_t', 'channel')])
## lr-wpan-spectrum-value-helper.h (module 'lr-wpan'): ns3::Ptr<ns3::SpectrumValue> ns3::LrWpanSpectrumValueHelper::CreateTxPowerSpectralDensity(double txPower, uint32_t channel) [member function]
cls.add_method('CreateTxPowerSpectralDensity',
'ns3::Ptr< ns3::SpectrumValue >',
[param('double', 'txPower'), param('uint32_t', 'channel')])
## lr-wpan-spectrum-value-helper.h (module 'lr-wpan'): static double ns3::LrWpanSpectrumValueHelper::TotalAvgPower(ns3::Ptr<ns3::SpectrumValue const> psd, uint32_t channel) [member function]
cls.add_method('TotalAvgPower',
'double',
[param('ns3::Ptr< ns3::SpectrumValue const >', 'psd'), param('uint32_t', 'channel')],
is_static=True)
return
def register_Ns3Mac16Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## mac16-address.h (module 'network'): ns3::Mac16Address::Mac16Address(ns3::Mac16Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac16Address const &', 'arg0')])
## mac16-address.h (module 'network'): ns3::Mac16Address::Mac16Address() [constructor]
cls.add_constructor([])
## mac16-address.h (module 'network'): ns3::Mac16Address::Mac16Address(char const * str) [constructor]
cls.add_constructor([param('char const *', 'str')])
## mac16-address.h (module 'network'): static ns3::Mac16Address ns3::Mac16Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac16Address',
[],
is_static=True)
## mac16-address.h (module 'network'): static ns3::Mac16Address ns3::Mac16Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac16Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac16-address.h (module 'network'): void ns3::Mac16Address::CopyFrom(uint8_t const * buffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'buffer')])
## mac16-address.h (module 'network'): void ns3::Mac16Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'buffer')],
is_const=True)
## mac16-address.h (module 'network'): static bool ns3::Mac16Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3Mac48Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor]
cls.add_constructor([param('char const *', 'str')])
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac48Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'buffer')])
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'buffer')],
is_const=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv4Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv6Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function]
cls.add_method('GetMulticast6Prefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function]
cls.add_method('GetMulticastPrefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function]
cls.add_method('IsGroup',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3Mac64Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## mac64-address.h (module 'network'): ns3::Mac64Address::Mac64Address(ns3::Mac64Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac64Address const &', 'arg0')])
## mac64-address.h (module 'network'): ns3::Mac64Address::Mac64Address() [constructor]
cls.add_constructor([])
## mac64-address.h (module 'network'): ns3::Mac64Address::Mac64Address(char const * str) [constructor]
cls.add_constructor([param('char const *', 'str')])
## mac64-address.h (module 'network'): static ns3::Mac64Address ns3::Mac64Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac64Address',
[],
is_static=True)
## mac64-address.h (module 'network'): static ns3::Mac64Address ns3::Mac64Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac64Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac64-address.h (module 'network'): void ns3::Mac64Address::CopyFrom(uint8_t const * buffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'buffer')])
## mac64-address.h (module 'network'): void ns3::Mac64Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'buffer')],
is_const=True)
## mac64-address.h (module 'network'): static bool ns3::Mac64Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3McpsDataConfirmParams_methods(root_module, cls):
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataConfirmParams::McpsDataConfirmParams() [constructor]
cls.add_constructor([])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataConfirmParams::McpsDataConfirmParams(ns3::McpsDataConfirmParams const & arg0) [copy constructor]
cls.add_constructor([param('ns3::McpsDataConfirmParams const &', 'arg0')])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataConfirmParams::m_msduHandle [variable]
cls.add_instance_attribute('m_msduHandle', 'uint8_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataConfirmParams::m_status [variable]
cls.add_instance_attribute('m_status', 'ns3::LrWpanMcpsDataConfirmStatus', is_const=False)
return
def register_Ns3McpsDataIndicationParams_methods(root_module, cls):
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataIndicationParams::McpsDataIndicationParams() [constructor]
cls.add_constructor([])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataIndicationParams::McpsDataIndicationParams(ns3::McpsDataIndicationParams const & arg0) [copy constructor]
cls.add_constructor([param('ns3::McpsDataIndicationParams const &', 'arg0')])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataIndicationParams::m_dsn [variable]
cls.add_instance_attribute('m_dsn', 'uint8_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataIndicationParams::m_dstAddr [variable]
cls.add_instance_attribute('m_dstAddr', 'ns3::Mac16Address', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataIndicationParams::m_dstAddrMode [variable]
cls.add_instance_attribute('m_dstAddrMode', 'uint8_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataIndicationParams::m_dstPanId [variable]
cls.add_instance_attribute('m_dstPanId', 'uint16_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataIndicationParams::m_mpduLinkQuality [variable]
cls.add_instance_attribute('m_mpduLinkQuality', 'uint8_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataIndicationParams::m_srcAddr [variable]
cls.add_instance_attribute('m_srcAddr', 'ns3::Mac16Address', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataIndicationParams::m_srcAddrMode [variable]
cls.add_instance_attribute('m_srcAddrMode', 'uint8_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataIndicationParams::m_srcPanId [variable]
cls.add_instance_attribute('m_srcPanId', 'uint16_t', is_const=False)
return
def register_Ns3McpsDataRequestParams_methods(root_module, cls):
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataRequestParams::McpsDataRequestParams(ns3::McpsDataRequestParams const & arg0) [copy constructor]
cls.add_constructor([param('ns3::McpsDataRequestParams const &', 'arg0')])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataRequestParams::McpsDataRequestParams() [constructor]
cls.add_constructor([])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataRequestParams::m_dstAddr [variable]
cls.add_instance_attribute('m_dstAddr', 'ns3::Mac16Address', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataRequestParams::m_dstAddrMode [variable]
cls.add_instance_attribute('m_dstAddrMode', 'ns3::LrWpanAddressMode', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataRequestParams::m_dstPanId [variable]
cls.add_instance_attribute('m_dstPanId', 'uint16_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataRequestParams::m_msduHandle [variable]
cls.add_instance_attribute('m_msduHandle', 'uint8_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataRequestParams::m_srcAddrMode [variable]
cls.add_instance_attribute('m_srcAddrMode', 'ns3::LrWpanAddressMode', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::McpsDataRequestParams::m_txOptions [variable]
cls.add_instance_attribute('m_txOptions', 'uint8_t', is_const=False)
return
def register_Ns3NetDeviceContainer_methods(root_module, cls):
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'arg0')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer() [constructor]
cls.add_constructor([])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::Ptr<ns3::NetDevice> dev) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::NetDevice >', 'dev')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(std::string devName) [constructor]
cls.add_constructor([param('std::string', 'devName')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & a, ns3::NetDeviceContainer const & b) [constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'a'), param('ns3::NetDeviceContainer const &', 'b')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::NetDeviceContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NetDeviceContainer', 'other')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(std::string deviceName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'deviceName')])
## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >',
[],
is_const=True)
## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >',
[],
is_const=True)
## net-device-container.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::NetDeviceContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_const=True)
## net-device-container.h (module 'network'): uint32_t ns3::NetDeviceContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3NodeContainer_methods(root_module, cls):
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'arg0')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer() [constructor]
cls.add_constructor([])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::Ptr<ns3::Node> node) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(std::string nodeName) [constructor]
cls.add_constructor([param('std::string', 'nodeName')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd')])
## node-container.h (module 'network'): ns3::NodeContainer::NodeContainer(ns3::NodeContainer const & a, ns3::NodeContainer const & b, ns3::NodeContainer const & c, ns3::NodeContainer const & d, ns3::NodeContainer const & e) [constructor]
cls.add_constructor([param('ns3::NodeContainer const &', 'a'), param('ns3::NodeContainer const &', 'b'), param('ns3::NodeContainer const &', 'c'), param('ns3::NodeContainer const &', 'd'), param('ns3::NodeContainer const &', 'e')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::NodeContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NodeContainer', 'other')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')])
## node-container.h (module 'network'): void ns3::NodeContainer::Add(std::string nodeName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'nodeName')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n')])
## node-container.h (module 'network'): void ns3::NodeContainer::Create(uint32_t n, uint32_t systemId) [member function]
cls.add_method('Create',
'void',
[param('uint32_t', 'n'), param('uint32_t', 'systemId')])
## node-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Node>*,std::vector<ns3::Ptr<ns3::Node>, std::allocator<ns3::Ptr<ns3::Node> > > > ns3::NodeContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Node > const, std::vector< ns3::Ptr< ns3::Node > > >',
[],
is_const=True)
## node-container.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NodeContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::Node >',
[param('uint32_t', 'i')],
is_const=True)
## node-container.h (module 'network'): static ns3::NodeContainer ns3::NodeContainer::GetGlobal() [member function]
cls.add_method('GetGlobal',
'ns3::NodeContainer',
[],
is_static=True)
## node-container.h (module 'network'): uint32_t ns3::NodeContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]
cls.add_constructor([param('std::string', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3PacketMetadata_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(uint64_t uid, uint32_t size) [constructor]
cls.add_constructor([param('uint64_t', 'uid'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::PacketMetadata(ns3::PacketMetadata const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddAtEnd(ns3::PacketMetadata const & o) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::PacketMetadata const &', 'o')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddPaddingAtEnd(uint32_t end) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::AddTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::PacketMetadata::BeginItem(ns3::Buffer buffer) const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[param('ns3::Buffer', 'buffer')],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata ns3::PacketMetadata::CreateFragment(uint32_t start, uint32_t end) const [member function]
cls.add_method('CreateFragment',
'ns3::PacketMetadata',
[param('uint32_t', 'start'), param('uint32_t', 'end')],
is_const=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Deserialize(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::Enable() [member function]
cls.add_method('Enable',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): static void ns3::PacketMetadata::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): uint64_t ns3::PacketMetadata::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtEnd(uint32_t end) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'end')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveAtStart(uint32_t start) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'start')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveHeader(ns3::Header const & header, uint32_t size) [member function]
cls.add_method('RemoveHeader',
'void',
[param('ns3::Header const &', 'header'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): void ns3::PacketMetadata::RemoveTrailer(ns3::Trailer const & trailer, uint32_t size) [member function]
cls.add_method('RemoveTrailer',
'void',
[param('ns3::Trailer const &', 'trailer'), param('uint32_t', 'size')])
## packet-metadata.h (module 'network'): uint32_t ns3::PacketMetadata::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3PacketMetadataItem_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item() [constructor]
cls.add_constructor([])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::Item(ns3::PacketMetadata::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::Item const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::current [variable]
cls.add_instance_attribute('current', 'ns3::Buffer::Iterator', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentSize [variable]
cls.add_instance_attribute('currentSize', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromEnd [variable]
cls.add_instance_attribute('currentTrimedFromEnd', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::currentTrimedFromStart [variable]
cls.add_instance_attribute('currentTrimedFromStart', 'uint32_t', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::isFragment [variable]
cls.add_instance_attribute('isFragment', 'bool', is_const=False)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PacketMetadataItemIterator_methods(root_module, cls):
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata::ItemIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketMetadata::ItemIterator const &', 'arg0')])
## packet-metadata.h (module 'network'): ns3::PacketMetadata::ItemIterator::ItemIterator(ns3::PacketMetadata const * metadata, ns3::Buffer buffer) [constructor]
cls.add_constructor([param('ns3::PacketMetadata const *', 'metadata'), param('ns3::Buffer', 'buffer')])
## packet-metadata.h (module 'network'): bool ns3::PacketMetadata::ItemIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet-metadata.h (module 'network'): ns3::PacketMetadata::Item ns3::PacketMetadata::ItemIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketMetadata::Item',
[])
return
def register_Ns3PacketTagIterator_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::PacketTagIterator(ns3::PacketTagIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator const &', 'arg0')])
## packet.h (module 'network'): bool ns3::PacketTagIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator::Item ns3::PacketTagIterator::Next() [member function]
cls.add_method('Next',
'ns3::PacketTagIterator::Item',
[])
return
def register_Ns3PacketTagIteratorItem_methods(root_module, cls):
## packet.h (module 'network'): ns3::PacketTagIterator::Item::Item(ns3::PacketTagIterator::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagIterator::Item const &', 'arg0')])
## packet.h (module 'network'): void ns3::PacketTagIterator::Item::GetTag(ns3::Tag & tag) const [member function]
cls.add_method('GetTag',
'void',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::TypeId ns3::PacketTagIterator::Item::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
return
def register_Ns3PacketTagList_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::PacketTagList(ns3::PacketTagList const & o) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList const &', 'o')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::Add(ns3::Tag const & tag) const [member function]
cls.add_method('Add',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData const * ns3::PacketTagList::Head() const [member function]
cls.add_method('Head',
'ns3::PacketTagList::TagData const *',
[],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Peek(ns3::Tag & tag) const [member function]
cls.add_method('Peek',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Remove(ns3::Tag & tag) [member function]
cls.add_method('Remove',
'bool',
[param('ns3::Tag &', 'tag')])
## packet-tag-list.h (module 'network'): void ns3::PacketTagList::RemoveAll() [member function]
cls.add_method('RemoveAll',
'void',
[])
## packet-tag-list.h (module 'network'): bool ns3::PacketTagList::Replace(ns3::Tag & tag) [member function]
cls.add_method('Replace',
'bool',
[param('ns3::Tag &', 'tag')])
return
def register_Ns3PacketTagListTagData_methods(root_module, cls):
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData() [constructor]
cls.add_constructor([])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::TagData(ns3::PacketTagList::TagData const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PacketTagList::TagData const &', 'arg0')])
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::count [variable]
cls.add_instance_attribute('count', 'uint32_t', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::data [variable]
cls.add_instance_attribute('data', 'uint8_t [ 21 ]', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::next [variable]
cls.add_instance_attribute('next', 'ns3::PacketTagList::TagData *', is_const=False)
## packet-tag-list.h (module 'network'): ns3::PacketTagList::TagData::tid [variable]
cls.add_instance_attribute('tid', 'ns3::TypeId', is_const=False)
return
def register_Ns3PcapFile_methods(root_module, cls):
## pcap-file.h (module 'network'): ns3::PcapFile::PcapFile() [constructor]
cls.add_constructor([])
## pcap-file.h (module 'network'): void ns3::PcapFile::Clear() [member function]
cls.add_method('Clear',
'void',
[])
## pcap-file.h (module 'network'): void ns3::PcapFile::Close() [member function]
cls.add_method('Close',
'void',
[])
## pcap-file.h (module 'network'): static bool ns3::PcapFile::Diff(std::string const & f1, std::string const & f2, uint32_t & sec, uint32_t & usec, uint32_t & packets, uint32_t snapLen=ns3::PcapFile::SNAPLEN_DEFAULT) [member function]
cls.add_method('Diff',
'bool',
[param('std::string const &', 'f1'), param('std::string const &', 'f2'), param('uint32_t &', 'sec'), param('uint32_t &', 'usec'), param('uint32_t &', 'packets'), param('uint32_t', 'snapLen', default_value='ns3::PcapFile::SNAPLEN_DEFAULT')],
is_static=True)
## pcap-file.h (module 'network'): bool ns3::PcapFile::Eof() const [member function]
cls.add_method('Eof',
'bool',
[],
is_const=True)
## pcap-file.h (module 'network'): bool ns3::PcapFile::Fail() const [member function]
cls.add_method('Fail',
'bool',
[],
is_const=True)
## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetDataLinkType() [member function]
cls.add_method('GetDataLinkType',
'uint32_t',
[])
## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetMagic() [member function]
cls.add_method('GetMagic',
'uint32_t',
[])
## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetSigFigs() [member function]
cls.add_method('GetSigFigs',
'uint32_t',
[])
## pcap-file.h (module 'network'): uint32_t ns3::PcapFile::GetSnapLen() [member function]
cls.add_method('GetSnapLen',
'uint32_t',
[])
## pcap-file.h (module 'network'): bool ns3::PcapFile::GetSwapMode() [member function]
cls.add_method('GetSwapMode',
'bool',
[])
## pcap-file.h (module 'network'): int32_t ns3::PcapFile::GetTimeZoneOffset() [member function]
cls.add_method('GetTimeZoneOffset',
'int32_t',
[])
## pcap-file.h (module 'network'): uint16_t ns3::PcapFile::GetVersionMajor() [member function]
cls.add_method('GetVersionMajor',
'uint16_t',
[])
## pcap-file.h (module 'network'): uint16_t ns3::PcapFile::GetVersionMinor() [member function]
cls.add_method('GetVersionMinor',
'uint16_t',
[])
## pcap-file.h (module 'network'): void ns3::PcapFile::Init(uint32_t dataLinkType, uint32_t snapLen=ns3::PcapFile::SNAPLEN_DEFAULT, int32_t timeZoneCorrection=ns3::PcapFile::ZONE_DEFAULT, bool swapMode=false) [member function]
cls.add_method('Init',
'void',
[param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='ns3::PcapFile::SNAPLEN_DEFAULT'), param('int32_t', 'timeZoneCorrection', default_value='ns3::PcapFile::ZONE_DEFAULT'), param('bool', 'swapMode', default_value='false')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Open(std::string const & filename, std::_Ios_Openmode mode) [member function]
cls.add_method('Open',
'void',
[param('std::string const &', 'filename'), param('std::_Ios_Openmode', 'mode')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Read(uint8_t * const data, uint32_t maxBytes, uint32_t & tsSec, uint32_t & tsUsec, uint32_t & inclLen, uint32_t & origLen, uint32_t & readLen) [member function]
cls.add_method('Read',
'void',
[param('uint8_t * const', 'data'), param('uint32_t', 'maxBytes'), param('uint32_t &', 'tsSec'), param('uint32_t &', 'tsUsec'), param('uint32_t &', 'inclLen'), param('uint32_t &', 'origLen'), param('uint32_t &', 'readLen')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, uint8_t const * const data, uint32_t totalLen) [member function]
cls.add_method('Write',
'void',
[param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('uint8_t const * const', 'data'), param('uint32_t', 'totalLen')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('Write',
'void',
[param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file.h (module 'network'): void ns3::PcapFile::Write(uint32_t tsSec, uint32_t tsUsec, ns3::Header const & header, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('Write',
'void',
[param('uint32_t', 'tsSec'), param('uint32_t', 'tsUsec'), param('ns3::Header const &', 'header'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file.h (module 'network'): ns3::PcapFile::SNAPLEN_DEFAULT [variable]
cls.add_static_attribute('SNAPLEN_DEFAULT', 'uint32_t const', is_const=True)
## pcap-file.h (module 'network'): ns3::PcapFile::ZONE_DEFAULT [variable]
cls.add_static_attribute('ZONE_DEFAULT', 'int32_t const', is_const=True)
return
def register_Ns3PcapHelper_methods(root_module, cls):
## trace-helper.h (module 'network'): ns3::PcapHelper::PcapHelper(ns3::PcapHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PcapHelper const &', 'arg0')])
## trace-helper.h (module 'network'): ns3::PcapHelper::PcapHelper() [constructor]
cls.add_constructor([])
## trace-helper.h (module 'network'): ns3::Ptr<ns3::PcapFileWrapper> ns3::PcapHelper::CreateFile(std::string filename, std::_Ios_Openmode filemode, uint32_t dataLinkType, uint32_t snapLen=std::numeric_limits<unsigned int>::max(), int32_t tzCorrection=0) [member function]
cls.add_method('CreateFile',
'ns3::Ptr< ns3::PcapFileWrapper >',
[param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode'), param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='std::numeric_limits<unsigned int>::max()'), param('int32_t', 'tzCorrection', default_value='0')])
## trace-helper.h (module 'network'): std::string ns3::PcapHelper::GetFilenameFromDevice(std::string prefix, ns3::Ptr<ns3::NetDevice> device, bool useObjectNames=true) [member function]
cls.add_method('GetFilenameFromDevice',
'std::string',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'useObjectNames', default_value='true')])
## trace-helper.h (module 'network'): std::string ns3::PcapHelper::GetFilenameFromInterfacePair(std::string prefix, ns3::Ptr<ns3::Object> object, uint32_t interface, bool useObjectNames=true) [member function]
cls.add_method('GetFilenameFromInterfacePair',
'std::string',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::Object >', 'object'), param('uint32_t', 'interface'), param('bool', 'useObjectNames', default_value='true')])
return
def register_Ns3PcapHelperForDevice_methods(root_module, cls):
## trace-helper.h (module 'network'): ns3::PcapHelperForDevice::PcapHelperForDevice(ns3::PcapHelperForDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PcapHelperForDevice const &', 'arg0')])
## trace-helper.h (module 'network'): ns3::PcapHelperForDevice::PcapHelperForDevice() [constructor]
cls.add_constructor([])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous=false, bool explicitFilename=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous', default_value='false'), param('bool', 'explicitFilename', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, std::string ndName, bool promiscuous=false, bool explicitFilename=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('std::string', 'ndName'), param('bool', 'promiscuous', default_value='false'), param('bool', 'explicitFilename', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::NetDeviceContainer d, bool promiscuous=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('ns3::NetDeviceContainer', 'd'), param('bool', 'promiscuous', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, ns3::NodeContainer n, bool promiscuous=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('ns3::NodeContainer', 'n'), param('bool', 'promiscuous', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcap(std::string prefix, uint32_t nodeid, uint32_t deviceid, bool promiscuous=false) [member function]
cls.add_method('EnablePcap',
'void',
[param('std::string', 'prefix'), param('uint32_t', 'nodeid'), param('uint32_t', 'deviceid'), param('bool', 'promiscuous', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcapAll(std::string prefix, bool promiscuous=false) [member function]
cls.add_method('EnablePcapAll',
'void',
[param('std::string', 'prefix'), param('bool', 'promiscuous', default_value='false')])
## trace-helper.h (module 'network'): void ns3::PcapHelperForDevice::EnablePcapInternal(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous, bool explicitFilename) [member function]
cls.add_method('EnablePcapInternal',
'void',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous'), param('bool', 'explicitFilename')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3SequenceNumber8_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_binary_numeric_operator('+', root_module['ns3::SequenceNumber8'], root_module['ns3::SequenceNumber8'], param('ns3::SequenceNumber< unsigned char, signed char > const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::SequenceNumber8'], root_module['ns3::SequenceNumber8'], param('signed char', u'right'))
cls.add_inplace_numeric_operator('+=', param('signed char', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::SequenceNumber8'], root_module['ns3::SequenceNumber8'], param('signed char', u'right'))
cls.add_inplace_numeric_operator('-=', param('signed char', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('>=')
## sequence-number.h (module 'network'): ns3::SequenceNumber<unsigned char, signed char>::SequenceNumber() [constructor]
cls.add_constructor([])
## sequence-number.h (module 'network'): ns3::SequenceNumber<unsigned char, signed char>::SequenceNumber(unsigned char value) [constructor]
cls.add_constructor([param('unsigned char', 'value')])
## sequence-number.h (module 'network'): ns3::SequenceNumber<unsigned char, signed char>::SequenceNumber(ns3::SequenceNumber<unsigned char, signed char> const & value) [copy constructor]
cls.add_constructor([param('ns3::SequenceNumber< unsigned char, signed char > const &', 'value')])
## sequence-number.h (module 'network'): unsigned char ns3::SequenceNumber<unsigned char, signed char>::GetValue() const [member function]
cls.add_method('GetValue',
'unsigned char',
[],
is_const=True)
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Simulator_methods(root_module, cls):
## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Simulator const &', 'arg0')])
## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function]
cls.add_method('GetImplementation',
'ns3::Ptr< ns3::SimulatorImpl >',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function]
cls.add_method('IsFinished',
'bool',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function]
cls.add_method('SetImplementation',
'void',
[param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & delay) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'delay')],
is_static=True)
return
def register_Ns3Tag_methods(root_module, cls):
## tag.h (module 'network'): ns3::Tag::Tag() [constructor]
cls.add_constructor([])
## tag.h (module 'network'): ns3::Tag::Tag(ns3::Tag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Tag const &', 'arg0')])
## tag.h (module 'network'): void ns3::Tag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_virtual=True)
## tag.h (module 'network'): uint32_t ns3::Tag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): static ns3::TypeId ns3::Tag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## tag.h (module 'network'): void ns3::Tag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## tag.h (module 'network'): void ns3::Tag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TimeWithUnit_methods(root_module, cls):
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor]
cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')])
return
def register_Ns3TracedValue__Ns3LrWpanMacState_methods(root_module, cls):
## traced-value.h (module 'core'): ns3::TracedValue<ns3::LrWpanMacState>::TracedValue() [constructor]
cls.add_constructor([])
## traced-value.h (module 'core'): ns3::TracedValue<ns3::LrWpanMacState>::TracedValue(ns3::TracedValue<ns3::LrWpanMacState> const & o) [copy constructor]
cls.add_constructor([param('ns3::TracedValue< ns3::LrWpanMacState > const &', 'o')])
## traced-value.h (module 'core'): ns3::TracedValue<ns3::LrWpanMacState>::TracedValue(ns3::LrWpanMacState const & v) [constructor]
cls.add_constructor([param('ns3::LrWpanMacState const &', 'v')])
## traced-value.h (module 'core'): void ns3::TracedValue<ns3::LrWpanMacState>::Connect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function]
cls.add_method('Connect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<ns3::LrWpanMacState>::ConnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('ConnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): void ns3::TracedValue<ns3::LrWpanMacState>::Disconnect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function]
cls.add_method('Disconnect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<ns3::LrWpanMacState>::DisconnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('DisconnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): ns3::LrWpanMacState ns3::TracedValue<ns3::LrWpanMacState>::Get() const [member function]
cls.add_method('Get',
'ns3::LrWpanMacState',
[],
is_const=True)
## traced-value.h (module 'core'): void ns3::TracedValue<ns3::LrWpanMacState>::Set(ns3::LrWpanMacState const & v) [member function]
cls.add_method('Set',
'void',
[param('ns3::LrWpanMacState const &', 'v')])
return
def register_Ns3TracedValue__Ns3LrWpanPhyEnumeration_methods(root_module, cls):
## traced-value.h (module 'core'): ns3::TracedValue<ns3::LrWpanPhyEnumeration>::TracedValue() [constructor]
cls.add_constructor([])
## traced-value.h (module 'core'): ns3::TracedValue<ns3::LrWpanPhyEnumeration>::TracedValue(ns3::TracedValue<ns3::LrWpanPhyEnumeration> const & o) [copy constructor]
cls.add_constructor([param('ns3::TracedValue< ns3::LrWpanPhyEnumeration > const &', 'o')])
## traced-value.h (module 'core'): ns3::TracedValue<ns3::LrWpanPhyEnumeration>::TracedValue(ns3::LrWpanPhyEnumeration const & v) [constructor]
cls.add_constructor([param('ns3::LrWpanPhyEnumeration const &', 'v')])
## traced-value.h (module 'core'): void ns3::TracedValue<ns3::LrWpanPhyEnumeration>::Connect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function]
cls.add_method('Connect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<ns3::LrWpanPhyEnumeration>::ConnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('ConnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): void ns3::TracedValue<ns3::LrWpanPhyEnumeration>::Disconnect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function]
cls.add_method('Disconnect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<ns3::LrWpanPhyEnumeration>::DisconnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('DisconnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): ns3::LrWpanPhyEnumeration ns3::TracedValue<ns3::LrWpanPhyEnumeration>::Get() const [member function]
cls.add_method('Get',
'ns3::LrWpanPhyEnumeration',
[],
is_const=True)
## traced-value.h (module 'core'): void ns3::TracedValue<ns3::LrWpanPhyEnumeration>::Set(ns3::LrWpanPhyEnumeration const & v) [member function]
cls.add_method('Set',
'void',
[param('ns3::LrWpanPhyEnumeration const &', 'v')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')],
deprecated=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function]
cls.add_method('GetSize',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function]
cls.add_method('SetSize',
'ns3::TypeId',
[param('std::size_t', 'size')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable]
cls.add_instance_attribute('callback', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long double v) [constructor]
cls.add_constructor([param('long double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::implementation [variable]
cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True)
return
def register_Ns3Chunk_methods(root_module, cls):
## chunk.h (module 'network'): ns3::Chunk::Chunk() [constructor]
cls.add_constructor([])
## chunk.h (module 'network'): ns3::Chunk::Chunk(ns3::Chunk const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Chunk const &', 'arg0')])
## chunk.h (module 'network'): uint32_t ns3::Chunk::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## chunk.h (module 'network'): static ns3::TypeId ns3::Chunk::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## chunk.h (module 'network'): void ns3::Chunk::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Header_methods(root_module, cls):
cls.add_output_stream_operator()
## header.h (module 'network'): ns3::Header::Header() [constructor]
cls.add_constructor([])
## header.h (module 'network'): ns3::Header::Header(ns3::Header const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Header const &', 'arg0')])
## header.h (module 'network'): uint32_t ns3::Header::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_virtual=True)
## header.h (module 'network'): uint32_t ns3::Header::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): static ns3::TypeId ns3::Header::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## header.h (module 'network'): void ns3::Header::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## header.h (module 'network'): void ns3::Header::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3LrWpanHelper_methods(root_module, cls):
## lr-wpan-helper.h (module 'lr-wpan'): ns3::LrWpanHelper::LrWpanHelper() [constructor]
cls.add_constructor([])
## lr-wpan-helper.h (module 'lr-wpan'): ns3::LrWpanHelper::LrWpanHelper(bool useMultiModelSpectrumChannel) [constructor]
cls.add_constructor([param('bool', 'useMultiModelSpectrumChannel')])
## lr-wpan-helper.h (module 'lr-wpan'): ns3::Ptr<ns3::SpectrumChannel> ns3::LrWpanHelper::GetChannel() [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::SpectrumChannel >',
[])
## lr-wpan-helper.h (module 'lr-wpan'): void ns3::LrWpanHelper::SetChannel(ns3::Ptr<ns3::SpectrumChannel> channel) [member function]
cls.add_method('SetChannel',
'void',
[param('ns3::Ptr< ns3::SpectrumChannel >', 'channel')])
## lr-wpan-helper.h (module 'lr-wpan'): void ns3::LrWpanHelper::SetChannel(std::string channelName) [member function]
cls.add_method('SetChannel',
'void',
[param('std::string', 'channelName')])
## lr-wpan-helper.h (module 'lr-wpan'): void ns3::LrWpanHelper::AddMobility(ns3::Ptr<ns3::LrWpanPhy> phy, ns3::Ptr<ns3::MobilityModel> m) [member function]
cls.add_method('AddMobility',
'void',
[param('ns3::Ptr< ns3::LrWpanPhy >', 'phy'), param('ns3::Ptr< ns3::MobilityModel >', 'm')])
## lr-wpan-helper.h (module 'lr-wpan'): ns3::NetDeviceContainer ns3::LrWpanHelper::Install(ns3::NodeContainer c) [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('ns3::NodeContainer', 'c')])
## lr-wpan-helper.h (module 'lr-wpan'): void ns3::LrWpanHelper::AssociateToPan(ns3::NetDeviceContainer c, uint16_t panId) [member function]
cls.add_method('AssociateToPan',
'void',
[param('ns3::NetDeviceContainer', 'c'), param('uint16_t', 'panId')])
## lr-wpan-helper.h (module 'lr-wpan'): void ns3::LrWpanHelper::EnableLogComponents() [member function]
cls.add_method('EnableLogComponents',
'void',
[])
## lr-wpan-helper.h (module 'lr-wpan'): static std::string ns3::LrWpanHelper::LrWpanPhyEnumerationPrinter(ns3::LrWpanPhyEnumeration e) [member function]
cls.add_method('LrWpanPhyEnumerationPrinter',
'std::string',
[param('ns3::LrWpanPhyEnumeration', 'e')],
is_static=True)
## lr-wpan-helper.h (module 'lr-wpan'): static std::string ns3::LrWpanHelper::LrWpanMacStatePrinter(ns3::LrWpanMacState e) [member function]
cls.add_method('LrWpanMacStatePrinter',
'std::string',
[param('ns3::LrWpanMacState', 'e')],
is_static=True)
## lr-wpan-helper.h (module 'lr-wpan'): int64_t ns3::LrWpanHelper::AssignStreams(ns3::NetDeviceContainer c, int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('ns3::NetDeviceContainer', 'c'), param('int64_t', 'stream')])
## lr-wpan-helper.h (module 'lr-wpan'): void ns3::LrWpanHelper::EnablePcapInternal(std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool promiscuous, bool explicitFilename) [member function]
cls.add_method('EnablePcapInternal',
'void',
[param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'promiscuous'), param('bool', 'explicitFilename')],
visibility='private', is_virtual=True)
## lr-wpan-helper.h (module 'lr-wpan'): void ns3::LrWpanHelper::EnableAsciiInternal(ns3::Ptr<ns3::OutputStreamWrapper> stream, std::string prefix, ns3::Ptr<ns3::NetDevice> nd, bool explicitFilename) [member function]
cls.add_method('EnableAsciiInternal',
'void',
[param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream'), param('std::string', 'prefix'), param('ns3::Ptr< ns3::NetDevice >', 'nd'), param('bool', 'explicitFilename')],
visibility='private', is_virtual=True)
return
def register_Ns3LrWpanLqiTag_methods(root_module, cls):
## lr-wpan-lqi-tag.h (module 'lr-wpan'): ns3::LrWpanLqiTag::LrWpanLqiTag(ns3::LrWpanLqiTag const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LrWpanLqiTag const &', 'arg0')])
## lr-wpan-lqi-tag.h (module 'lr-wpan'): ns3::LrWpanLqiTag::LrWpanLqiTag() [constructor]
cls.add_constructor([])
## lr-wpan-lqi-tag.h (module 'lr-wpan'): ns3::LrWpanLqiTag::LrWpanLqiTag(uint8_t lqi) [constructor]
cls.add_constructor([param('uint8_t', 'lqi')])
## lr-wpan-lqi-tag.h (module 'lr-wpan'): void ns3::LrWpanLqiTag::Deserialize(ns3::TagBuffer i) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_virtual=True)
## lr-wpan-lqi-tag.h (module 'lr-wpan'): uint8_t ns3::LrWpanLqiTag::Get() const [member function]
cls.add_method('Get',
'uint8_t',
[],
is_const=True)
## lr-wpan-lqi-tag.h (module 'lr-wpan'): ns3::TypeId ns3::LrWpanLqiTag::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## lr-wpan-lqi-tag.h (module 'lr-wpan'): uint32_t ns3::LrWpanLqiTag::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## lr-wpan-lqi-tag.h (module 'lr-wpan'): static ns3::TypeId ns3::LrWpanLqiTag::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## lr-wpan-lqi-tag.h (module 'lr-wpan'): void ns3::LrWpanLqiTag::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## lr-wpan-lqi-tag.h (module 'lr-wpan'): void ns3::LrWpanLqiTag::Serialize(ns3::TagBuffer i) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'i')],
is_const=True, is_virtual=True)
## lr-wpan-lqi-tag.h (module 'lr-wpan'): void ns3::LrWpanLqiTag::Set(uint8_t lqi) [member function]
cls.add_method('Set',
'void',
[param('uint8_t', 'lqi')])
return
def register_Ns3LrWpanMacHeader_methods(root_module, cls):
## lr-wpan-mac-header.h (module 'lr-wpan'): ns3::LrWpanMacHeader::LrWpanMacHeader(ns3::LrWpanMacHeader const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LrWpanMacHeader const &', 'arg0')])
## lr-wpan-mac-header.h (module 'lr-wpan'): ns3::LrWpanMacHeader::LrWpanMacHeader() [constructor]
cls.add_constructor([])
## lr-wpan-mac-header.h (module 'lr-wpan'): ns3::LrWpanMacHeader::LrWpanMacHeader(ns3::LrWpanMacHeader::LrWpanMacType wpanMacType, uint8_t seqNum) [constructor]
cls.add_constructor([param('ns3::LrWpanMacHeader::LrWpanMacType', 'wpanMacType'), param('uint8_t', 'seqNum')])
## lr-wpan-mac-header.h (module 'lr-wpan'): uint32_t ns3::LrWpanMacHeader::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint8_t ns3::LrWpanMacHeader::GetDstAddrMode() const [member function]
cls.add_method('GetDstAddrMode',
'uint8_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint16_t ns3::LrWpanMacHeader::GetDstPanId() const [member function]
cls.add_method('GetDstPanId',
'uint16_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): ns3::Mac64Address ns3::LrWpanMacHeader::GetExtDstAddr() const [member function]
cls.add_method('GetExtDstAddr',
'ns3::Mac64Address',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): ns3::Mac64Address ns3::LrWpanMacHeader::GetExtSrcAddr() const [member function]
cls.add_method('GetExtSrcAddr',
'ns3::Mac64Address',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint16_t ns3::LrWpanMacHeader::GetFrameControl() const [member function]
cls.add_method('GetFrameControl',
'uint16_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint8_t ns3::LrWpanMacHeader::GetFrameVer() const [member function]
cls.add_method('GetFrameVer',
'uint8_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint32_t ns3::LrWpanMacHeader::GetFrmCounter() const [member function]
cls.add_method('GetFrmCounter',
'uint32_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint8_t ns3::LrWpanMacHeader::GetFrmCtrlRes() const [member function]
cls.add_method('GetFrmCtrlRes',
'uint8_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): ns3::TypeId ns3::LrWpanMacHeader::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint8_t ns3::LrWpanMacHeader::GetKeyIdIndex() const [member function]
cls.add_method('GetKeyIdIndex',
'uint8_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint8_t ns3::LrWpanMacHeader::GetKeyIdMode() const [member function]
cls.add_method('GetKeyIdMode',
'uint8_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint32_t ns3::LrWpanMacHeader::GetKeyIdSrc32() const [member function]
cls.add_method('GetKeyIdSrc32',
'uint32_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint64_t ns3::LrWpanMacHeader::GetKeyIdSrc64() const [member function]
cls.add_method('GetKeyIdSrc64',
'uint64_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint8_t ns3::LrWpanMacHeader::GetSecControl() const [member function]
cls.add_method('GetSecControl',
'uint8_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint8_t ns3::LrWpanMacHeader::GetSecCtrlReserved() const [member function]
cls.add_method('GetSecCtrlReserved',
'uint8_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint8_t ns3::LrWpanMacHeader::GetSecLevel() const [member function]
cls.add_method('GetSecLevel',
'uint8_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint8_t ns3::LrWpanMacHeader::GetSeqNum() const [member function]
cls.add_method('GetSeqNum',
'uint8_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint32_t ns3::LrWpanMacHeader::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): ns3::Mac16Address ns3::LrWpanMacHeader::GetShortDstAddr() const [member function]
cls.add_method('GetShortDstAddr',
'ns3::Mac16Address',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): ns3::Mac16Address ns3::LrWpanMacHeader::GetShortSrcAddr() const [member function]
cls.add_method('GetShortSrcAddr',
'ns3::Mac16Address',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint8_t ns3::LrWpanMacHeader::GetSrcAddrMode() const [member function]
cls.add_method('GetSrcAddrMode',
'uint8_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): uint16_t ns3::LrWpanMacHeader::GetSrcPanId() const [member function]
cls.add_method('GetSrcPanId',
'uint16_t',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): ns3::LrWpanMacHeader::LrWpanMacType ns3::LrWpanMacHeader::GetType() const [member function]
cls.add_method('GetType',
'ns3::LrWpanMacHeader::LrWpanMacType',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): static ns3::TypeId ns3::LrWpanMacHeader::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): bool ns3::LrWpanMacHeader::IsAckReq() const [member function]
cls.add_method('IsAckReq',
'bool',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): bool ns3::LrWpanMacHeader::IsAcknowledgment() const [member function]
cls.add_method('IsAcknowledgment',
'bool',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): bool ns3::LrWpanMacHeader::IsBeacon() const [member function]
cls.add_method('IsBeacon',
'bool',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): bool ns3::LrWpanMacHeader::IsCommand() const [member function]
cls.add_method('IsCommand',
'bool',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): bool ns3::LrWpanMacHeader::IsData() const [member function]
cls.add_method('IsData',
'bool',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): bool ns3::LrWpanMacHeader::IsFrmPend() const [member function]
cls.add_method('IsFrmPend',
'bool',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): bool ns3::LrWpanMacHeader::IsPanIdComp() const [member function]
cls.add_method('IsPanIdComp',
'bool',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): bool ns3::LrWpanMacHeader::IsSecEnable() const [member function]
cls.add_method('IsSecEnable',
'bool',
[],
is_const=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetAckReq() [member function]
cls.add_method('SetAckReq',
'void',
[])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetDstAddrFields(uint16_t panId, ns3::Mac16Address addr) [member function]
cls.add_method('SetDstAddrFields',
'void',
[param('uint16_t', 'panId'), param('ns3::Mac16Address', 'addr')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetDstAddrFields(uint16_t panId, ns3::Mac64Address addr) [member function]
cls.add_method('SetDstAddrFields',
'void',
[param('uint16_t', 'panId'), param('ns3::Mac64Address', 'addr')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetDstAddrMode(uint8_t addrMode) [member function]
cls.add_method('SetDstAddrMode',
'void',
[param('uint8_t', 'addrMode')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetFrameControl(uint16_t frameControl) [member function]
cls.add_method('SetFrameControl',
'void',
[param('uint16_t', 'frameControl')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetFrameVer(uint8_t ver) [member function]
cls.add_method('SetFrameVer',
'void',
[param('uint8_t', 'ver')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetFrmCounter(uint32_t frmCntr) [member function]
cls.add_method('SetFrmCounter',
'void',
[param('uint32_t', 'frmCntr')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetFrmCtrlRes(uint8_t res) [member function]
cls.add_method('SetFrmCtrlRes',
'void',
[param('uint8_t', 'res')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetFrmPend() [member function]
cls.add_method('SetFrmPend',
'void',
[])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetKeyId(uint8_t keyIndex) [member function]
cls.add_method('SetKeyId',
'void',
[param('uint8_t', 'keyIndex')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetKeyId(uint32_t keySrc, uint8_t keyIndex) [member function]
cls.add_method('SetKeyId',
'void',
[param('uint32_t', 'keySrc'), param('uint8_t', 'keyIndex')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetKeyId(uint64_t keySrc, uint8_t keyIndex) [member function]
cls.add_method('SetKeyId',
'void',
[param('uint64_t', 'keySrc'), param('uint8_t', 'keyIndex')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetKeyIdMode(uint8_t keyIdMode) [member function]
cls.add_method('SetKeyIdMode',
'void',
[param('uint8_t', 'keyIdMode')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetNoAckReq() [member function]
cls.add_method('SetNoAckReq',
'void',
[])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetNoFrmPend() [member function]
cls.add_method('SetNoFrmPend',
'void',
[])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetNoPanIdComp() [member function]
cls.add_method('SetNoPanIdComp',
'void',
[])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetPanIdComp() [member function]
cls.add_method('SetPanIdComp',
'void',
[])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetSecControl(uint8_t secLevel) [member function]
cls.add_method('SetSecControl',
'void',
[param('uint8_t', 'secLevel')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetSecCtrlReserved(uint8_t res) [member function]
cls.add_method('SetSecCtrlReserved',
'void',
[param('uint8_t', 'res')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetSecDisable() [member function]
cls.add_method('SetSecDisable',
'void',
[])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetSecEnable() [member function]
cls.add_method('SetSecEnable',
'void',
[])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetSecLevel(uint8_t secLevel) [member function]
cls.add_method('SetSecLevel',
'void',
[param('uint8_t', 'secLevel')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetSeqNum(uint8_t seqNum) [member function]
cls.add_method('SetSeqNum',
'void',
[param('uint8_t', 'seqNum')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetSrcAddrFields(uint16_t panId, ns3::Mac16Address addr) [member function]
cls.add_method('SetSrcAddrFields',
'void',
[param('uint16_t', 'panId'), param('ns3::Mac16Address', 'addr')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetSrcAddrFields(uint16_t panId, ns3::Mac64Address addr) [member function]
cls.add_method('SetSrcAddrFields',
'void',
[param('uint16_t', 'panId'), param('ns3::Mac64Address', 'addr')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetSrcAddrMode(uint8_t addrMode) [member function]
cls.add_method('SetSrcAddrMode',
'void',
[param('uint8_t', 'addrMode')])
## lr-wpan-mac-header.h (module 'lr-wpan'): void ns3::LrWpanMacHeader::SetType(ns3::LrWpanMacHeader::LrWpanMacType wpanMacType) [member function]
cls.add_method('SetType',
'void',
[param('ns3::LrWpanMacHeader::LrWpanMacType', 'wpanMacType')])
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3PcapFileWrapper_methods(root_module, cls):
## pcap-file-wrapper.h (module 'network'): static ns3::TypeId ns3::PcapFileWrapper::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## pcap-file-wrapper.h (module 'network'): ns3::PcapFileWrapper::PcapFileWrapper() [constructor]
cls.add_constructor([])
## pcap-file-wrapper.h (module 'network'): bool ns3::PcapFileWrapper::Fail() const [member function]
cls.add_method('Fail',
'bool',
[],
is_const=True)
## pcap-file-wrapper.h (module 'network'): bool ns3::PcapFileWrapper::Eof() const [member function]
cls.add_method('Eof',
'bool',
[],
is_const=True)
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Clear() [member function]
cls.add_method('Clear',
'void',
[])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Open(std::string const & filename, std::_Ios_Openmode mode) [member function]
cls.add_method('Open',
'void',
[param('std::string const &', 'filename'), param('std::_Ios_Openmode', 'mode')])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Close() [member function]
cls.add_method('Close',
'void',
[])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Init(uint32_t dataLinkType, uint32_t snapLen=std::numeric_limits<unsigned int>::max(), int32_t tzCorrection=ns3::PcapFile::ZONE_DEFAULT) [member function]
cls.add_method('Init',
'void',
[param('uint32_t', 'dataLinkType'), param('uint32_t', 'snapLen', default_value='std::numeric_limits<unsigned int>::max()'), param('int32_t', 'tzCorrection', default_value='ns3::PcapFile::ZONE_DEFAULT')])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('Write',
'void',
[param('ns3::Time', 't'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, ns3::Header const & header, ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('Write',
'void',
[param('ns3::Time', 't'), param('ns3::Header const &', 'header'), param('ns3::Ptr< ns3::Packet const >', 'p')])
## pcap-file-wrapper.h (module 'network'): void ns3::PcapFileWrapper::Write(ns3::Time t, uint8_t const * buffer, uint32_t length) [member function]
cls.add_method('Write',
'void',
[param('ns3::Time', 't'), param('uint8_t const *', 'buffer'), param('uint32_t', 'length')])
## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetMagic() [member function]
cls.add_method('GetMagic',
'uint32_t',
[])
## pcap-file-wrapper.h (module 'network'): uint16_t ns3::PcapFileWrapper::GetVersionMajor() [member function]
cls.add_method('GetVersionMajor',
'uint16_t',
[])
## pcap-file-wrapper.h (module 'network'): uint16_t ns3::PcapFileWrapper::GetVersionMinor() [member function]
cls.add_method('GetVersionMinor',
'uint16_t',
[])
## pcap-file-wrapper.h (module 'network'): int32_t ns3::PcapFileWrapper::GetTimeZoneOffset() [member function]
cls.add_method('GetTimeZoneOffset',
'int32_t',
[])
## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetSigFigs() [member function]
cls.add_method('GetSigFigs',
'uint32_t',
[])
## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetSnapLen() [member function]
cls.add_method('GetSnapLen',
'uint32_t',
[])
## pcap-file-wrapper.h (module 'network'): uint32_t ns3::PcapFileWrapper::GetDataLinkType() [member function]
cls.add_method('GetDataLinkType',
'uint32_t',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3LrWpanInterferenceHelper_Ns3Empty_Ns3DefaultDeleter__lt__ns3LrWpanInterferenceHelper__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::LrWpanInterferenceHelper, ns3::empty, ns3::DefaultDeleter<ns3::LrWpanInterferenceHelper> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::LrWpanInterferenceHelper, ns3::empty, ns3::DefaultDeleter<ns3::LrWpanInterferenceHelper> >::SimpleRefCount(ns3::SimpleRefCount<ns3::LrWpanInterferenceHelper, ns3::empty, ns3::DefaultDeleter<ns3::LrWpanInterferenceHelper> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::LrWpanInterferenceHelper, ns3::empty, ns3::DefaultDeleter< ns3::LrWpanInterferenceHelper > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::LrWpanInterferenceHelper, ns3::empty, ns3::DefaultDeleter<ns3::LrWpanInterferenceHelper> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount(ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter< ns3::OutputStreamWrapper > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3SpectrumSignalParameters_Ns3Empty_Ns3DefaultDeleter__lt__ns3SpectrumSignalParameters__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SpectrumSignalParameters, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumSignalParameters> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SpectrumSignalParameters, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumSignalParameters> >::SimpleRefCount(ns3::SimpleRefCount<ns3::SpectrumSignalParameters, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumSignalParameters> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::SpectrumSignalParameters, ns3::empty, ns3::DefaultDeleter< ns3::SpectrumSignalParameters > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::SpectrumSignalParameters, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumSignalParameters> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SpectrumPhy_methods(root_module, cls):
## spectrum-phy.h (module 'spectrum'): ns3::SpectrumPhy::SpectrumPhy() [constructor]
cls.add_constructor([])
## spectrum-phy.h (module 'spectrum'): static ns3::TypeId ns3::SpectrumPhy::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## spectrum-phy.h (module 'spectrum'): void ns3::SpectrumPhy::SetDevice(ns3::Ptr<ns3::NetDevice> d) [member function]
cls.add_method('SetDevice',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'd')],
is_pure_virtual=True, is_virtual=True)
## spectrum-phy.h (module 'spectrum'): ns3::Ptr<ns3::NetDevice> ns3::SpectrumPhy::GetDevice() const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## spectrum-phy.h (module 'spectrum'): void ns3::SpectrumPhy::SetMobility(ns3::Ptr<ns3::MobilityModel> m) [member function]
cls.add_method('SetMobility',
'void',
[param('ns3::Ptr< ns3::MobilityModel >', 'm')],
is_pure_virtual=True, is_virtual=True)
## spectrum-phy.h (module 'spectrum'): ns3::Ptr<ns3::MobilityModel> ns3::SpectrumPhy::GetMobility() [member function]
cls.add_method('GetMobility',
'ns3::Ptr< ns3::MobilityModel >',
[],
is_pure_virtual=True, is_virtual=True)
## spectrum-phy.h (module 'spectrum'): void ns3::SpectrumPhy::SetChannel(ns3::Ptr<ns3::SpectrumChannel> c) [member function]
cls.add_method('SetChannel',
'void',
[param('ns3::Ptr< ns3::SpectrumChannel >', 'c')],
is_pure_virtual=True, is_virtual=True)
## spectrum-phy.h (module 'spectrum'): ns3::Ptr<ns3::SpectrumModel const> ns3::SpectrumPhy::GetRxSpectrumModel() const [member function]
cls.add_method('GetRxSpectrumModel',
'ns3::Ptr< ns3::SpectrumModel const >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## spectrum-phy.h (module 'spectrum'): ns3::Ptr<ns3::AntennaModel> ns3::SpectrumPhy::GetRxAntenna() [member function]
cls.add_method('GetRxAntenna',
'ns3::Ptr< ns3::AntennaModel >',
[],
is_pure_virtual=True, is_virtual=True)
## spectrum-phy.h (module 'spectrum'): void ns3::SpectrumPhy::StartRx(ns3::Ptr<ns3::SpectrumSignalParameters> params) [member function]
cls.add_method('StartRx',
'void',
[param('ns3::Ptr< ns3::SpectrumSignalParameters >', 'params')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3SpectrumSignalParameters_methods(root_module, cls):
## spectrum-signal-parameters.h (module 'spectrum'): ns3::SpectrumSignalParameters::SpectrumSignalParameters() [constructor]
cls.add_constructor([])
## spectrum-signal-parameters.h (module 'spectrum'): ns3::SpectrumSignalParameters::SpectrumSignalParameters(ns3::SpectrumSignalParameters const & p) [copy constructor]
cls.add_constructor([param('ns3::SpectrumSignalParameters const &', 'p')])
## spectrum-signal-parameters.h (module 'spectrum'): ns3::Ptr<ns3::SpectrumSignalParameters> ns3::SpectrumSignalParameters::Copy() [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::SpectrumSignalParameters >',
[],
is_virtual=True)
## spectrum-signal-parameters.h (module 'spectrum'): ns3::SpectrumSignalParameters::duration [variable]
cls.add_instance_attribute('duration', 'ns3::Time', is_const=False)
## spectrum-signal-parameters.h (module 'spectrum'): ns3::SpectrumSignalParameters::psd [variable]
cls.add_instance_attribute('psd', 'ns3::Ptr< ns3::SpectrumValue >', is_const=False)
## spectrum-signal-parameters.h (module 'spectrum'): ns3::SpectrumSignalParameters::txAntenna [variable]
cls.add_instance_attribute('txAntenna', 'ns3::Ptr< ns3::AntennaModel >', is_const=False)
## spectrum-signal-parameters.h (module 'spectrum'): ns3::SpectrumSignalParameters::txPhy [variable]
cls.add_instance_attribute('txPhy', 'ns3::Ptr< ns3::SpectrumPhy >', is_const=False)
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function]
cls.add_method('As',
'ns3::TimeWithUnit',
[param('ns3::Time::Unit const', 'unit')],
is_const=True)
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function]
cls.add_method('GetDays',
'double',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function]
cls.add_method('GetHours',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function]
cls.add_method('GetMinutes',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function]
cls.add_method('GetYears',
'double',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function]
cls.add_method('Max',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function]
cls.add_method('Min',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function]
cls.add_method('StaticInit',
'bool',
[],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3Trailer_methods(root_module, cls):
cls.add_output_stream_operator()
## trailer.h (module 'network'): ns3::Trailer::Trailer() [constructor]
cls.add_constructor([])
## trailer.h (module 'network'): ns3::Trailer::Trailer(ns3::Trailer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Trailer const &', 'arg0')])
## trailer.h (module 'network'): uint32_t ns3::Trailer::Deserialize(ns3::Buffer::Iterator end) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'end')],
is_pure_virtual=True, is_virtual=True)
## trailer.h (module 'network'): uint32_t ns3::Trailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): static ns3::TypeId ns3::Trailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## trailer.h (module 'network'): void ns3::Trailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trailer.h (module 'network'): void ns3::Trailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3BooleanChecker_methods(root_module, cls):
## boolean.h (module 'core'): ns3::BooleanChecker::BooleanChecker() [constructor]
cls.add_constructor([])
## boolean.h (module 'core'): ns3::BooleanChecker::BooleanChecker(ns3::BooleanChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BooleanChecker const &', 'arg0')])
return
def register_Ns3BooleanValue_methods(root_module, cls):
cls.add_output_stream_operator()
## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue(ns3::BooleanValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BooleanValue const &', 'arg0')])
## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue() [constructor]
cls.add_constructor([])
## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue(bool value) [constructor]
cls.add_constructor([param('bool', 'value')])
## boolean.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::BooleanValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## boolean.h (module 'core'): bool ns3::BooleanValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## boolean.h (module 'core'): bool ns3::BooleanValue::Get() const [member function]
cls.add_method('Get',
'bool',
[],
is_const=True)
## boolean.h (module 'core'): std::string ns3::BooleanValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## boolean.h (module 'core'): void ns3::BooleanValue::Set(bool value) [member function]
cls.add_method('Set',
'void',
[param('bool', 'value')])
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3DoubleValue_methods(root_module, cls):
## double.h (module 'core'): ns3::DoubleValue::DoubleValue() [constructor]
cls.add_constructor([])
## double.h (module 'core'): ns3::DoubleValue::DoubleValue(ns3::DoubleValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DoubleValue const &', 'arg0')])
## double.h (module 'core'): ns3::DoubleValue::DoubleValue(double const & value) [constructor]
cls.add_constructor([param('double const &', 'value')])
## double.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::DoubleValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## double.h (module 'core'): bool ns3::DoubleValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## double.h (module 'core'): double ns3::DoubleValue::Get() const [member function]
cls.add_method('Get',
'double',
[],
is_const=True)
## double.h (module 'core'): std::string ns3::DoubleValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## double.h (module 'core'): void ns3::DoubleValue::Set(double const & value) [member function]
cls.add_method('Set',
'void',
[param('double const &', 'value')])
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3EnumChecker_methods(root_module, cls):
## enum.h (module 'core'): ns3::EnumChecker::EnumChecker(ns3::EnumChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EnumChecker const &', 'arg0')])
## enum.h (module 'core'): ns3::EnumChecker::EnumChecker() [constructor]
cls.add_constructor([])
## enum.h (module 'core'): void ns3::EnumChecker::Add(int value, std::string name) [member function]
cls.add_method('Add',
'void',
[param('int', 'value'), param('std::string', 'name')])
## enum.h (module 'core'): void ns3::EnumChecker::AddDefault(int value, std::string name) [member function]
cls.add_method('AddDefault',
'void',
[param('int', 'value'), param('std::string', 'name')])
## enum.h (module 'core'): bool ns3::EnumChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## enum.h (module 'core'): bool ns3::EnumChecker::Copy(ns3::AttributeValue const & src, ns3::AttributeValue & dst) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'src'), param('ns3::AttributeValue &', 'dst')],
is_const=True, is_virtual=True)
## enum.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EnumChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): std::string ns3::EnumChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): std::string ns3::EnumChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): bool ns3::EnumChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_virtual=True)
return
def register_Ns3EnumValue_methods(root_module, cls):
## enum.h (module 'core'): ns3::EnumValue::EnumValue(ns3::EnumValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EnumValue const &', 'arg0')])
## enum.h (module 'core'): ns3::EnumValue::EnumValue() [constructor]
cls.add_constructor([])
## enum.h (module 'core'): ns3::EnumValue::EnumValue(int value) [constructor]
cls.add_constructor([param('int', 'value')])
## enum.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EnumValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): bool ns3::EnumValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## enum.h (module 'core'): int ns3::EnumValue::Get() const [member function]
cls.add_method('Get',
'int',
[],
is_const=True)
## enum.h (module 'core'): std::string ns3::EnumValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## enum.h (module 'core'): void ns3::EnumValue::Set(int value) [member function]
cls.add_method('Set',
'void',
[param('int', 'value')])
return
def register_Ns3EventImpl_methods(root_module, cls):
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EventImpl const &', 'arg0')])
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor]
cls.add_constructor([])
## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[])
## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function]
cls.add_method('IsCancelled',
'bool',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function]
cls.add_method('Notify',
'void',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
return
def register_Ns3IntegerValue_methods(root_module, cls):
## integer.h (module 'core'): ns3::IntegerValue::IntegerValue() [constructor]
cls.add_constructor([])
## integer.h (module 'core'): ns3::IntegerValue::IntegerValue(ns3::IntegerValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntegerValue const &', 'arg0')])
## integer.h (module 'core'): ns3::IntegerValue::IntegerValue(int64_t const & value) [constructor]
cls.add_constructor([param('int64_t const &', 'value')])
## integer.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::IntegerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## integer.h (module 'core'): bool ns3::IntegerValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## integer.h (module 'core'): int64_t ns3::IntegerValue::Get() const [member function]
cls.add_method('Get',
'int64_t',
[],
is_const=True)
## integer.h (module 'core'): std::string ns3::IntegerValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## integer.h (module 'core'): void ns3::IntegerValue::Set(int64_t const & value) [member function]
cls.add_method('Set',
'void',
[param('int64_t const &', 'value')])
return
def register_Ns3Ipv4AddressChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv4AddressValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Address const &', 'value')])
return
def register_Ns3Ipv4MaskChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')])
return
def register_Ns3Ipv4MaskValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Mask const &', 'value')])
return
def register_Ns3Ipv6AddressChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv6AddressValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Address const &', 'value')])
return
def register_Ns3Ipv6PrefixChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')])
return
def register_Ns3Ipv6PrefixValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Prefix',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Prefix const &', 'value')])
return
def register_Ns3LrWpanCsmaCa_methods(root_module, cls):
## lr-wpan-csmaca.h (module 'lr-wpan'): static ns3::TypeId ns3::LrWpanCsmaCa::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## lr-wpan-csmaca.h (module 'lr-wpan'): ns3::LrWpanCsmaCa::LrWpanCsmaCa() [constructor]
cls.add_constructor([])
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::SetMac(ns3::Ptr<ns3::LrWpanMac> mac) [member function]
cls.add_method('SetMac',
'void',
[param('ns3::Ptr< ns3::LrWpanMac >', 'mac')])
## lr-wpan-csmaca.h (module 'lr-wpan'): ns3::Ptr<ns3::LrWpanMac> ns3::LrWpanCsmaCa::GetMac() const [member function]
cls.add_method('GetMac',
'ns3::Ptr< ns3::LrWpanMac >',
[],
is_const=True)
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::SetSlottedCsmaCa() [member function]
cls.add_method('SetSlottedCsmaCa',
'void',
[])
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::SetUnSlottedCsmaCa() [member function]
cls.add_method('SetUnSlottedCsmaCa',
'void',
[])
## lr-wpan-csmaca.h (module 'lr-wpan'): bool ns3::LrWpanCsmaCa::IsSlottedCsmaCa() const [member function]
cls.add_method('IsSlottedCsmaCa',
'bool',
[],
is_const=True)
## lr-wpan-csmaca.h (module 'lr-wpan'): bool ns3::LrWpanCsmaCa::IsUnSlottedCsmaCa() const [member function]
cls.add_method('IsUnSlottedCsmaCa',
'bool',
[],
is_const=True)
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::SetMacMinBE(uint8_t macMinBE) [member function]
cls.add_method('SetMacMinBE',
'void',
[param('uint8_t', 'macMinBE')])
## lr-wpan-csmaca.h (module 'lr-wpan'): uint8_t ns3::LrWpanCsmaCa::GetMacMinBE() const [member function]
cls.add_method('GetMacMinBE',
'uint8_t',
[],
is_const=True)
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::SetMacMaxBE(uint8_t macMaxBE) [member function]
cls.add_method('SetMacMaxBE',
'void',
[param('uint8_t', 'macMaxBE')])
## lr-wpan-csmaca.h (module 'lr-wpan'): uint8_t ns3::LrWpanCsmaCa::GetMacMaxBE() const [member function]
cls.add_method('GetMacMaxBE',
'uint8_t',
[],
is_const=True)
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::SetMacMaxCSMABackoffs(uint8_t macMaxCSMABackoffs) [member function]
cls.add_method('SetMacMaxCSMABackoffs',
'void',
[param('uint8_t', 'macMaxCSMABackoffs')])
## lr-wpan-csmaca.h (module 'lr-wpan'): uint8_t ns3::LrWpanCsmaCa::GetMacMaxCSMABackoffs() const [member function]
cls.add_method('GetMacMaxCSMABackoffs',
'uint8_t',
[],
is_const=True)
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::SetUnitBackoffPeriod(uint64_t unitBackoffPeriod) [member function]
cls.add_method('SetUnitBackoffPeriod',
'void',
[param('uint64_t', 'unitBackoffPeriod')])
## lr-wpan-csmaca.h (module 'lr-wpan'): uint64_t ns3::LrWpanCsmaCa::GetUnitBackoffPeriod() const [member function]
cls.add_method('GetUnitBackoffPeriod',
'uint64_t',
[],
is_const=True)
## lr-wpan-csmaca.h (module 'lr-wpan'): ns3::Time ns3::LrWpanCsmaCa::GetTimeToNextSlot() const [member function]
cls.add_method('GetTimeToNextSlot',
'ns3::Time',
[],
is_const=True)
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::Start() [member function]
cls.add_method('Start',
'void',
[])
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::RandomBackoffDelay() [member function]
cls.add_method('RandomBackoffDelay',
'void',
[])
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::CanProceed() [member function]
cls.add_method('CanProceed',
'void',
[])
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::RequestCCA() [member function]
cls.add_method('RequestCCA',
'void',
[])
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::PlmeCcaConfirm(ns3::LrWpanPhyEnumeration status) [member function]
cls.add_method('PlmeCcaConfirm',
'void',
[param('ns3::LrWpanPhyEnumeration', 'status')])
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::SetLrWpanMacStateCallback(ns3::LrWpanMacStateCallback macState) [member function]
cls.add_method('SetLrWpanMacStateCallback',
'void',
[param('ns3::LrWpanMacStateCallback', 'macState')])
## lr-wpan-csmaca.h (module 'lr-wpan'): int64_t ns3::LrWpanCsmaCa::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')])
## lr-wpan-csmaca.h (module 'lr-wpan'): uint8_t ns3::LrWpanCsmaCa::GetNB() [member function]
cls.add_method('GetNB',
'uint8_t',
[])
## lr-wpan-csmaca.h (module 'lr-wpan'): void ns3::LrWpanCsmaCa::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3LrWpanErrorModel_methods(root_module, cls):
## lr-wpan-error-model.h (module 'lr-wpan'): ns3::LrWpanErrorModel::LrWpanErrorModel(ns3::LrWpanErrorModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LrWpanErrorModel const &', 'arg0')])
## lr-wpan-error-model.h (module 'lr-wpan'): ns3::LrWpanErrorModel::LrWpanErrorModel() [constructor]
cls.add_constructor([])
## lr-wpan-error-model.h (module 'lr-wpan'): double ns3::LrWpanErrorModel::GetChunkSuccessRate(double snr, uint32_t nbits) const [member function]
cls.add_method('GetChunkSuccessRate',
'double',
[param('double', 'snr'), param('uint32_t', 'nbits')],
is_const=True)
## lr-wpan-error-model.h (module 'lr-wpan'): static ns3::TypeId ns3::LrWpanErrorModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3LrWpanInterferenceHelper_methods(root_module, cls):
## lr-wpan-interference-helper.h (module 'lr-wpan'): ns3::LrWpanInterferenceHelper::LrWpanInterferenceHelper(ns3::Ptr<ns3::SpectrumModel const> spectrumModel) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::SpectrumModel const >', 'spectrumModel')])
## lr-wpan-interference-helper.h (module 'lr-wpan'): bool ns3::LrWpanInterferenceHelper::AddSignal(ns3::Ptr<ns3::SpectrumValue const> signal) [member function]
cls.add_method('AddSignal',
'bool',
[param('ns3::Ptr< ns3::SpectrumValue const >', 'signal')])
## lr-wpan-interference-helper.h (module 'lr-wpan'): bool ns3::LrWpanInterferenceHelper::RemoveSignal(ns3::Ptr<ns3::SpectrumValue const> signal) [member function]
cls.add_method('RemoveSignal',
'bool',
[param('ns3::Ptr< ns3::SpectrumValue const >', 'signal')])
## lr-wpan-interference-helper.h (module 'lr-wpan'): void ns3::LrWpanInterferenceHelper::ClearSignals() [member function]
cls.add_method('ClearSignals',
'void',
[])
## lr-wpan-interference-helper.h (module 'lr-wpan'): ns3::Ptr<ns3::SpectrumValue> ns3::LrWpanInterferenceHelper::GetSignalPsd() const [member function]
cls.add_method('GetSignalPsd',
'ns3::Ptr< ns3::SpectrumValue >',
[],
is_const=True)
## lr-wpan-interference-helper.h (module 'lr-wpan'): ns3::Ptr<ns3::SpectrumModel const> ns3::LrWpanInterferenceHelper::GetSpectrumModel() const [member function]
cls.add_method('GetSpectrumModel',
'ns3::Ptr< ns3::SpectrumModel const >',
[],
is_const=True)
return
def register_Ns3LrWpanMac_methods(root_module, cls):
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::LrWpanMac(ns3::LrWpanMac const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LrWpanMac const &', 'arg0')])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::LrWpanMac() [constructor]
cls.add_constructor([])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanAssociationStatus ns3::LrWpanMac::GetAssociationStatus() const [member function]
cls.add_method('GetAssociationStatus',
'ns3::LrWpanAssociationStatus',
[],
is_const=True)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::Mac64Address ns3::LrWpanMac::GetExtendedAddress() const [member function]
cls.add_method('GetExtendedAddress',
'ns3::Mac64Address',
[],
is_const=True)
## lr-wpan-mac.h (module 'lr-wpan'): uint64_t ns3::LrWpanMac::GetMacAckWaitDuration() const [member function]
cls.add_method('GetMacAckWaitDuration',
'uint64_t',
[],
is_const=True)
## lr-wpan-mac.h (module 'lr-wpan'): uint8_t ns3::LrWpanMac::GetMacMaxFrameRetries() const [member function]
cls.add_method('GetMacMaxFrameRetries',
'uint8_t',
[],
is_const=True)
## lr-wpan-mac.h (module 'lr-wpan'): uint16_t ns3::LrWpanMac::GetPanId() const [member function]
cls.add_method('GetPanId',
'uint16_t',
[],
is_const=True)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::Ptr<ns3::LrWpanPhy> ns3::LrWpanMac::GetPhy() [member function]
cls.add_method('GetPhy',
'ns3::Ptr< ns3::LrWpanPhy >',
[])
## lr-wpan-mac.h (module 'lr-wpan'): bool ns3::LrWpanMac::GetRxOnWhenIdle() [member function]
cls.add_method('GetRxOnWhenIdle',
'bool',
[])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::Mac16Address ns3::LrWpanMac::GetShortAddress() const [member function]
cls.add_method('GetShortAddress',
'ns3::Mac16Address',
[],
is_const=True)
## lr-wpan-mac.h (module 'lr-wpan'): static ns3::TypeId ns3::LrWpanMac::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::McpsDataRequest(ns3::McpsDataRequestParams params, ns3::Ptr<ns3::Packet> p) [member function]
cls.add_method('McpsDataRequest',
'void',
[param('ns3::McpsDataRequestParams', 'params'), param('ns3::Ptr< ns3::Packet >', 'p')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::PdDataConfirm(ns3::LrWpanPhyEnumeration status) [member function]
cls.add_method('PdDataConfirm',
'void',
[param('ns3::LrWpanPhyEnumeration', 'status')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::PdDataIndication(uint32_t psduLength, ns3::Ptr<ns3::Packet> p, uint8_t lqi) [member function]
cls.add_method('PdDataIndication',
'void',
[param('uint32_t', 'psduLength'), param('ns3::Ptr< ns3::Packet >', 'p'), param('uint8_t', 'lqi')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::PlmeCcaConfirm(ns3::LrWpanPhyEnumeration status) [member function]
cls.add_method('PlmeCcaConfirm',
'void',
[param('ns3::LrWpanPhyEnumeration', 'status')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::PlmeEdConfirm(ns3::LrWpanPhyEnumeration status, uint8_t energyLevel) [member function]
cls.add_method('PlmeEdConfirm',
'void',
[param('ns3::LrWpanPhyEnumeration', 'status'), param('uint8_t', 'energyLevel')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::PlmeGetAttributeConfirm(ns3::LrWpanPhyEnumeration status, ns3::LrWpanPibAttributeIdentifier id, ns3::LrWpanPhyPibAttributes * attribute) [member function]
cls.add_method('PlmeGetAttributeConfirm',
'void',
[param('ns3::LrWpanPhyEnumeration', 'status'), param('ns3::LrWpanPibAttributeIdentifier', 'id'), param('ns3::LrWpanPhyPibAttributes *', 'attribute')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::PlmeSetAttributeConfirm(ns3::LrWpanPhyEnumeration status, ns3::LrWpanPibAttributeIdentifier id) [member function]
cls.add_method('PlmeSetAttributeConfirm',
'void',
[param('ns3::LrWpanPhyEnumeration', 'status'), param('ns3::LrWpanPibAttributeIdentifier', 'id')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::PlmeSetTRXStateConfirm(ns3::LrWpanPhyEnumeration status) [member function]
cls.add_method('PlmeSetTRXStateConfirm',
'void',
[param('ns3::LrWpanPhyEnumeration', 'status')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::SetAssociationStatus(ns3::LrWpanAssociationStatus status) [member function]
cls.add_method('SetAssociationStatus',
'void',
[param('ns3::LrWpanAssociationStatus', 'status')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::SetCsmaCa(ns3::Ptr<ns3::LrWpanCsmaCa> csmaCa) [member function]
cls.add_method('SetCsmaCa',
'void',
[param('ns3::Ptr< ns3::LrWpanCsmaCa >', 'csmaCa')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::SetExtendedAddress(ns3::Mac64Address address) [member function]
cls.add_method('SetExtendedAddress',
'void',
[param('ns3::Mac64Address', 'address')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::SetLrWpanMacState(ns3::LrWpanMacState macState) [member function]
cls.add_method('SetLrWpanMacState',
'void',
[param('ns3::LrWpanMacState', 'macState')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::SetMacMaxFrameRetries(uint8_t retries) [member function]
cls.add_method('SetMacMaxFrameRetries',
'void',
[param('uint8_t', 'retries')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::SetMcpsDataConfirmCallback(ns3::McpsDataConfirmCallback c) [member function]
cls.add_method('SetMcpsDataConfirmCallback',
'void',
[param('ns3::McpsDataConfirmCallback', 'c')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::SetMcpsDataIndicationCallback(ns3::McpsDataIndicationCallback c) [member function]
cls.add_method('SetMcpsDataIndicationCallback',
'void',
[param('ns3::McpsDataIndicationCallback', 'c')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::SetPanId(uint16_t panId) [member function]
cls.add_method('SetPanId',
'void',
[param('uint16_t', 'panId')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::SetPhy(ns3::Ptr<ns3::LrWpanPhy> phy) [member function]
cls.add_method('SetPhy',
'void',
[param('ns3::Ptr< ns3::LrWpanPhy >', 'phy')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::SetRxOnWhenIdle(bool rxOnWhenIdle) [member function]
cls.add_method('SetRxOnWhenIdle',
'void',
[param('bool', 'rxOnWhenIdle')])
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::SetShortAddress(ns3::Mac16Address address) [member function]
cls.add_method('SetShortAddress',
'void',
[param('ns3::Mac16Address', 'address')])
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::aMinMPDUOverhead [variable]
cls.add_static_attribute('aMinMPDUOverhead', 'uint32_t const', is_const=True)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::m_aBaseSlotDuration [variable]
cls.add_instance_attribute('m_aBaseSlotDuration', 'uint64_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::m_aBaseSuperframeDuration [variable]
cls.add_instance_attribute('m_aBaseSuperframeDuration', 'uint64_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::m_aNumSuperframeSlots [variable]
cls.add_instance_attribute('m_aNumSuperframeSlots', 'uint64_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::m_macBeaconOrder [variable]
cls.add_instance_attribute('m_macBeaconOrder', 'uint64_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::m_macBeaconTxTime [variable]
cls.add_instance_attribute('m_macBeaconTxTime', 'uint64_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::m_macDsn [variable]
cls.add_instance_attribute('m_macDsn', 'ns3::SequenceNumber8', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::m_macMaxFrameRetries [variable]
cls.add_instance_attribute('m_macMaxFrameRetries', 'uint8_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::m_macPanId [variable]
cls.add_instance_attribute('m_macPanId', 'uint16_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::m_macPromiscuousMode [variable]
cls.add_instance_attribute('m_macPromiscuousMode', 'bool', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::m_macRxOnWhenIdle [variable]
cls.add_instance_attribute('m_macRxOnWhenIdle', 'bool', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::m_macSuperframeOrder [variable]
cls.add_instance_attribute('m_macSuperframeOrder', 'uint64_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): ns3::LrWpanMac::m_macSyncSymbolOffset [variable]
cls.add_instance_attribute('m_macSyncSymbolOffset', 'uint64_t', is_const=False)
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## lr-wpan-mac.h (module 'lr-wpan'): void ns3::LrWpanMac::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3LrWpanMacTrailer_methods(root_module, cls):
## lr-wpan-mac-trailer.h (module 'lr-wpan'): ns3::LrWpanMacTrailer::LrWpanMacTrailer(ns3::LrWpanMacTrailer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LrWpanMacTrailer const &', 'arg0')])
## lr-wpan-mac-trailer.h (module 'lr-wpan'): ns3::LrWpanMacTrailer::LrWpanMacTrailer() [constructor]
cls.add_constructor([])
## lr-wpan-mac-trailer.h (module 'lr-wpan'): bool ns3::LrWpanMacTrailer::CheckFcs(ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('CheckFcs',
'bool',
[param('ns3::Ptr< ns3::Packet const >', 'p')])
## lr-wpan-mac-trailer.h (module 'lr-wpan'): uint32_t ns3::LrWpanMacTrailer::Deserialize(ns3::Buffer::Iterator start) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('ns3::Buffer::Iterator', 'start')],
is_virtual=True)
## lr-wpan-mac-trailer.h (module 'lr-wpan'): void ns3::LrWpanMacTrailer::EnableFcs(bool enable) [member function]
cls.add_method('EnableFcs',
'void',
[param('bool', 'enable')])
## lr-wpan-mac-trailer.h (module 'lr-wpan'): uint16_t ns3::LrWpanMacTrailer::GetFcs() const [member function]
cls.add_method('GetFcs',
'uint16_t',
[],
is_const=True)
## lr-wpan-mac-trailer.h (module 'lr-wpan'): ns3::TypeId ns3::LrWpanMacTrailer::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## lr-wpan-mac-trailer.h (module 'lr-wpan'): uint32_t ns3::LrWpanMacTrailer::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True, is_virtual=True)
## lr-wpan-mac-trailer.h (module 'lr-wpan'): static ns3::TypeId ns3::LrWpanMacTrailer::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## lr-wpan-mac-trailer.h (module 'lr-wpan'): bool ns3::LrWpanMacTrailer::IsFcsEnabled() [member function]
cls.add_method('IsFcsEnabled',
'bool',
[])
## lr-wpan-mac-trailer.h (module 'lr-wpan'): void ns3::LrWpanMacTrailer::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True, is_virtual=True)
## lr-wpan-mac-trailer.h (module 'lr-wpan'): void ns3::LrWpanMacTrailer::Serialize(ns3::Buffer::Iterator start) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::Buffer::Iterator', 'start')],
is_const=True, is_virtual=True)
## lr-wpan-mac-trailer.h (module 'lr-wpan'): void ns3::LrWpanMacTrailer::SetFcs(ns3::Ptr<const ns3::Packet> p) [member function]
cls.add_method('SetFcs',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'p')])
## lr-wpan-mac-trailer.h (module 'lr-wpan'): ns3::LrWpanMacTrailer::LR_WPAN_MAC_FCS_LENGTH [variable]
cls.add_static_attribute('LR_WPAN_MAC_FCS_LENGTH', 'uint16_t const', is_const=True)
return
def register_Ns3LrWpanPhy_methods(root_module, cls):
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhy::aMaxPhyPacketSize [variable]
cls.add_static_attribute('aMaxPhyPacketSize', 'uint32_t const', is_const=True)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhy::aTurnaroundTime [variable]
cls.add_static_attribute('aTurnaroundTime', 'uint32_t const', is_const=True)
## lr-wpan-phy.h (module 'lr-wpan'): static ns3::TypeId ns3::LrWpanPhy::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::LrWpanPhy::LrWpanPhy() [constructor]
cls.add_constructor([])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::SetMobility(ns3::Ptr<ns3::MobilityModel> m) [member function]
cls.add_method('SetMobility',
'void',
[param('ns3::Ptr< ns3::MobilityModel >', 'm')],
is_virtual=True)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::Ptr<ns3::MobilityModel> ns3::LrWpanPhy::GetMobility() [member function]
cls.add_method('GetMobility',
'ns3::Ptr< ns3::MobilityModel >',
[],
is_virtual=True)
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::SetChannel(ns3::Ptr<ns3::SpectrumChannel> c) [member function]
cls.add_method('SetChannel',
'void',
[param('ns3::Ptr< ns3::SpectrumChannel >', 'c')],
is_virtual=True)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::Ptr<ns3::SpectrumChannel> ns3::LrWpanPhy::GetChannel() [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::SpectrumChannel >',
[])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::SetDevice(ns3::Ptr<ns3::NetDevice> d) [member function]
cls.add_method('SetDevice',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'd')],
is_virtual=True)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::Ptr<ns3::NetDevice> ns3::LrWpanPhy::GetDevice() const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[],
is_const=True, is_virtual=True)
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::SetAntenna(ns3::Ptr<ns3::AntennaModel> a) [member function]
cls.add_method('SetAntenna',
'void',
[param('ns3::Ptr< ns3::AntennaModel >', 'a')])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::Ptr<ns3::AntennaModel> ns3::LrWpanPhy::GetRxAntenna() [member function]
cls.add_method('GetRxAntenna',
'ns3::Ptr< ns3::AntennaModel >',
[],
is_virtual=True)
## lr-wpan-phy.h (module 'lr-wpan'): ns3::Ptr<ns3::SpectrumModel const> ns3::LrWpanPhy::GetRxSpectrumModel() const [member function]
cls.add_method('GetRxSpectrumModel',
'ns3::Ptr< ns3::SpectrumModel const >',
[],
is_const=True, is_virtual=True)
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::SetTxPowerSpectralDensity(ns3::Ptr<ns3::SpectrumValue> txPsd) [member function]
cls.add_method('SetTxPowerSpectralDensity',
'void',
[param('ns3::Ptr< ns3::SpectrumValue >', 'txPsd')])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::SetNoisePowerSpectralDensity(ns3::Ptr<ns3::SpectrumValue const> noisePsd) [member function]
cls.add_method('SetNoisePowerSpectralDensity',
'void',
[param('ns3::Ptr< ns3::SpectrumValue const >', 'noisePsd')])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::Ptr<ns3::SpectrumValue const> ns3::LrWpanPhy::GetNoisePowerSpectralDensity() [member function]
cls.add_method('GetNoisePowerSpectralDensity',
'ns3::Ptr< ns3::SpectrumValue const >',
[])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::StartRx(ns3::Ptr<ns3::SpectrumSignalParameters> params) [member function]
cls.add_method('StartRx',
'void',
[param('ns3::Ptr< ns3::SpectrumSignalParameters >', 'params')],
is_virtual=True)
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::PdDataRequest(uint32_t const psduLength, ns3::Ptr<ns3::Packet> p) [member function]
cls.add_method('PdDataRequest',
'void',
[param('uint32_t const', 'psduLength'), param('ns3::Ptr< ns3::Packet >', 'p')])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::PlmeCcaRequest() [member function]
cls.add_method('PlmeCcaRequest',
'void',
[])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::PlmeEdRequest() [member function]
cls.add_method('PlmeEdRequest',
'void',
[])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::PlmeGetAttributeRequest(ns3::LrWpanPibAttributeIdentifier id) [member function]
cls.add_method('PlmeGetAttributeRequest',
'void',
[param('ns3::LrWpanPibAttributeIdentifier', 'id')])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::PlmeSetTRXStateRequest(ns3::LrWpanPhyEnumeration state) [member function]
cls.add_method('PlmeSetTRXStateRequest',
'void',
[param('ns3::LrWpanPhyEnumeration', 'state')])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::PlmeSetAttributeRequest(ns3::LrWpanPibAttributeIdentifier id, ns3::LrWpanPhyPibAttributes * attribute) [member function]
cls.add_method('PlmeSetAttributeRequest',
'void',
[param('ns3::LrWpanPibAttributeIdentifier', 'id'), param('ns3::LrWpanPhyPibAttributes *', 'attribute')])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::SetPdDataIndicationCallback(ns3::PdDataIndicationCallback c) [member function]
cls.add_method('SetPdDataIndicationCallback',
'void',
[param('ns3::PdDataIndicationCallback', 'c')])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::SetPdDataConfirmCallback(ns3::PdDataConfirmCallback c) [member function]
cls.add_method('SetPdDataConfirmCallback',
'void',
[param('ns3::PdDataConfirmCallback', 'c')])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::SetPlmeCcaConfirmCallback(ns3::PlmeCcaConfirmCallback c) [member function]
cls.add_method('SetPlmeCcaConfirmCallback',
'void',
[param('ns3::PlmeCcaConfirmCallback', 'c')])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::SetPlmeEdConfirmCallback(ns3::PlmeEdConfirmCallback c) [member function]
cls.add_method('SetPlmeEdConfirmCallback',
'void',
[param('ns3::PlmeEdConfirmCallback', 'c')])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::SetPlmeGetAttributeConfirmCallback(ns3::PlmeGetAttributeConfirmCallback c) [member function]
cls.add_method('SetPlmeGetAttributeConfirmCallback',
'void',
[param('ns3::PlmeGetAttributeConfirmCallback', 'c')])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::SetPlmeSetTRXStateConfirmCallback(ns3::PlmeSetTRXStateConfirmCallback c) [member function]
cls.add_method('SetPlmeSetTRXStateConfirmCallback',
'void',
[param('ns3::PlmeSetTRXStateConfirmCallback', 'c')])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::SetPlmeSetAttributeConfirmCallback(ns3::PlmeSetAttributeConfirmCallback c) [member function]
cls.add_method('SetPlmeSetAttributeConfirmCallback',
'void',
[param('ns3::PlmeSetAttributeConfirmCallback', 'c')])
## lr-wpan-phy.h (module 'lr-wpan'): double ns3::LrWpanPhy::GetDataOrSymbolRate(bool isData) [member function]
cls.add_method('GetDataOrSymbolRate',
'double',
[param('bool', 'isData')])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::SetErrorModel(ns3::Ptr<ns3::LrWpanErrorModel> e) [member function]
cls.add_method('SetErrorModel',
'void',
[param('ns3::Ptr< ns3::LrWpanErrorModel >', 'e')])
## lr-wpan-phy.h (module 'lr-wpan'): ns3::Ptr<ns3::LrWpanErrorModel> ns3::LrWpanPhy::GetErrorModel() const [member function]
cls.add_method('GetErrorModel',
'ns3::Ptr< ns3::LrWpanErrorModel >',
[],
is_const=True)
## lr-wpan-phy.h (module 'lr-wpan'): uint64_t ns3::LrWpanPhy::GetPhySHRDuration() const [member function]
cls.add_method('GetPhySHRDuration',
'uint64_t',
[],
is_const=True)
## lr-wpan-phy.h (module 'lr-wpan'): double ns3::LrWpanPhy::GetPhySymbolsPerOctet() const [member function]
cls.add_method('GetPhySymbolsPerOctet',
'double',
[],
is_const=True)
## lr-wpan-phy.h (module 'lr-wpan'): int64_t ns3::LrWpanPhy::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')])
## lr-wpan-phy.h (module 'lr-wpan'): void ns3::LrWpanPhy::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3LrWpanSpectrumSignalParameters_methods(root_module, cls):
## lr-wpan-spectrum-signal-parameters.h (module 'lr-wpan'): ns3::LrWpanSpectrumSignalParameters::LrWpanSpectrumSignalParameters() [constructor]
cls.add_constructor([])
## lr-wpan-spectrum-signal-parameters.h (module 'lr-wpan'): ns3::LrWpanSpectrumSignalParameters::LrWpanSpectrumSignalParameters(ns3::LrWpanSpectrumSignalParameters const & p) [copy constructor]
cls.add_constructor([param('ns3::LrWpanSpectrumSignalParameters const &', 'p')])
## lr-wpan-spectrum-signal-parameters.h (module 'lr-wpan'): ns3::Ptr<ns3::SpectrumSignalParameters> ns3::LrWpanSpectrumSignalParameters::Copy() [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::SpectrumSignalParameters >',
[],
is_virtual=True)
## lr-wpan-spectrum-signal-parameters.h (module 'lr-wpan'): ns3::LrWpanSpectrumSignalParameters::packetBurst [variable]
cls.add_instance_attribute('packetBurst', 'ns3::Ptr< ns3::PacketBurst >', is_const=False)
return
def register_Ns3Mac16AddressChecker_methods(root_module, cls):
## mac16-address.h (module 'network'): ns3::Mac16AddressChecker::Mac16AddressChecker() [constructor]
cls.add_constructor([])
## mac16-address.h (module 'network'): ns3::Mac16AddressChecker::Mac16AddressChecker(ns3::Mac16AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac16AddressChecker const &', 'arg0')])
return
def register_Ns3Mac16AddressValue_methods(root_module, cls):
## mac16-address.h (module 'network'): ns3::Mac16AddressValue::Mac16AddressValue() [constructor]
cls.add_constructor([])
## mac16-address.h (module 'network'): ns3::Mac16AddressValue::Mac16AddressValue(ns3::Mac16AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac16AddressValue const &', 'arg0')])
## mac16-address.h (module 'network'): ns3::Mac16AddressValue::Mac16AddressValue(ns3::Mac16Address const & value) [constructor]
cls.add_constructor([param('ns3::Mac16Address const &', 'value')])
## mac16-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac16AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## mac16-address.h (module 'network'): bool ns3::Mac16AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## mac16-address.h (module 'network'): ns3::Mac16Address ns3::Mac16AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Mac16Address',
[],
is_const=True)
## mac16-address.h (module 'network'): std::string ns3::Mac16AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## mac16-address.h (module 'network'): void ns3::Mac16AddressValue::Set(ns3::Mac16Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Mac16Address const &', 'value')])
return
def register_Ns3Mac48AddressChecker_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')])
return
def register_Ns3Mac48AddressValue_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'value')])
## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Mac48Address',
[],
is_const=True)
## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Mac48Address const &', 'value')])
return
def register_Ns3Mac64AddressChecker_methods(root_module, cls):
## mac64-address.h (module 'network'): ns3::Mac64AddressChecker::Mac64AddressChecker() [constructor]
cls.add_constructor([])
## mac64-address.h (module 'network'): ns3::Mac64AddressChecker::Mac64AddressChecker(ns3::Mac64AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac64AddressChecker const &', 'arg0')])
return
def register_Ns3Mac64AddressValue_methods(root_module, cls):
## mac64-address.h (module 'network'): ns3::Mac64AddressValue::Mac64AddressValue() [constructor]
cls.add_constructor([])
## mac64-address.h (module 'network'): ns3::Mac64AddressValue::Mac64AddressValue(ns3::Mac64AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac64AddressValue const &', 'arg0')])
## mac64-address.h (module 'network'): ns3::Mac64AddressValue::Mac64AddressValue(ns3::Mac64Address const & value) [constructor]
cls.add_constructor([param('ns3::Mac64Address const &', 'value')])
## mac64-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac64AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## mac64-address.h (module 'network'): bool ns3::Mac64AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## mac64-address.h (module 'network'): ns3::Mac64Address ns3::Mac64AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Mac64Address',
[],
is_const=True)
## mac64-address.h (module 'network'): std::string ns3::Mac64AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## mac64-address.h (module 'network'): void ns3::Mac64AddressValue::Set(ns3::Mac64Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Mac64Address const &', 'value')])
return
def register_Ns3NetDevice_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,const ns3::Address&,ns3::NetDevice::PacketType,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3NixVector_methods(root_module, cls):
cls.add_output_stream_operator()
## nix-vector.h (module 'network'): ns3::NixVector::NixVector() [constructor]
cls.add_constructor([])
## nix-vector.h (module 'network'): ns3::NixVector::NixVector(ns3::NixVector const & o) [copy constructor]
cls.add_constructor([param('ns3::NixVector const &', 'o')])
## nix-vector.h (module 'network'): void ns3::NixVector::AddNeighborIndex(uint32_t newBits, uint32_t numberOfBits) [member function]
cls.add_method('AddNeighborIndex',
'void',
[param('uint32_t', 'newBits'), param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::BitCount(uint32_t numberOfNeighbors) const [member function]
cls.add_method('BitCount',
'uint32_t',
[param('uint32_t', 'numberOfNeighbors')],
is_const=True)
## nix-vector.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::NixVector::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Deserialize(uint32_t const * buffer, uint32_t size) [member function]
cls.add_method('Deserialize',
'uint32_t',
[param('uint32_t const *', 'buffer'), param('uint32_t', 'size')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::ExtractNeighborIndex(uint32_t numberOfBits) [member function]
cls.add_method('ExtractNeighborIndex',
'uint32_t',
[param('uint32_t', 'numberOfBits')])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetRemainingBits() [member function]
cls.add_method('GetRemainingBits',
'uint32_t',
[])
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## nix-vector.h (module 'network'): uint32_t ns3::NixVector::Serialize(uint32_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint32_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
return
def register_Ns3Node_methods(root_module, cls):
## node.h (module 'network'): ns3::Node::Node(ns3::Node const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Node const &', 'arg0')])
## node.h (module 'network'): ns3::Node::Node() [constructor]
cls.add_constructor([])
## node.h (module 'network'): ns3::Node::Node(uint32_t systemId) [constructor]
cls.add_constructor([param('uint32_t', 'systemId')])
## node.h (module 'network'): uint32_t ns3::Node::AddApplication(ns3::Ptr<ns3::Application> application) [member function]
cls.add_method('AddApplication',
'uint32_t',
[param('ns3::Ptr< ns3::Application >', 'application')])
## node.h (module 'network'): uint32_t ns3::Node::AddDevice(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('AddDevice',
'uint32_t',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## node.h (module 'network'): static bool ns3::Node::ChecksumEnabled() [member function]
cls.add_method('ChecksumEnabled',
'bool',
[],
is_static=True)
## node.h (module 'network'): ns3::Ptr<ns3::Application> ns3::Node::GetApplication(uint32_t index) const [member function]
cls.add_method('GetApplication',
'ns3::Ptr< ns3::Application >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Node::GetDevice(uint32_t index) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'index')],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): ns3::Time ns3::Node::GetLocalTime() const [member function]
cls.add_method('GetLocalTime',
'ns3::Time',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNApplications() const [member function]
cls.add_method('GetNApplications',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): uint32_t ns3::Node::GetSystemId() const [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_const=True)
## node.h (module 'network'): static ns3::TypeId ns3::Node::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## node.h (module 'network'): void ns3::Node::RegisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('RegisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::RegisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler, uint16_t protocolType, ns3::Ptr<ns3::NetDevice> device, bool promiscuous=false) [member function]
cls.add_method('RegisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler'), param('uint16_t', 'protocolType'), param('ns3::Ptr< ns3::NetDevice >', 'device'), param('bool', 'promiscuous', default_value='false')])
## node.h (module 'network'): void ns3::Node::UnregisterDeviceAdditionListener(ns3::Callback<void,ns3::Ptr<ns3::NetDevice>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> listener) [member function]
cls.add_method('UnregisterDeviceAdditionListener',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'listener')])
## node.h (module 'network'): void ns3::Node::UnregisterProtocolHandler(ns3::Callback<void, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> handler) [member function]
cls.add_method('UnregisterProtocolHandler',
'void',
[param('ns3::Callback< void, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'handler')])
## node.h (module 'network'): void ns3::Node::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## node.h (module 'network'): void ns3::Node::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3OutputStreamWrapper_methods(root_module, cls):
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(ns3::OutputStreamWrapper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::OutputStreamWrapper const &', 'arg0')])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::string filename, std::_Ios_Openmode filemode) [constructor]
cls.add_constructor([param('std::string', 'filename'), param('std::_Ios_Openmode', 'filemode')])
## output-stream-wrapper.h (module 'network'): ns3::OutputStreamWrapper::OutputStreamWrapper(std::ostream * os) [constructor]
cls.add_constructor([param('std::ostream *', 'os')])
## output-stream-wrapper.h (module 'network'): std::ostream * ns3::OutputStreamWrapper::GetStream() [member function]
cls.add_method('GetStream',
'std::ostream *',
[])
return
def register_Ns3Packet_methods(root_module, cls):
cls.add_output_stream_operator()
## packet.h (module 'network'): ns3::Packet::Packet() [constructor]
cls.add_constructor([])
## packet.h (module 'network'): ns3::Packet::Packet(ns3::Packet const & o) [copy constructor]
cls.add_constructor([param('ns3::Packet const &', 'o')])
## packet.h (module 'network'): ns3::Packet::Packet(uint32_t size) [constructor]
cls.add_constructor([param('uint32_t', 'size')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size, bool magic) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size'), param('bool', 'magic')])
## packet.h (module 'network'): ns3::Packet::Packet(uint8_t const * buffer, uint32_t size) [constructor]
cls.add_constructor([param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddAtEnd(ns3::Ptr<const ns3::Packet> packet) [member function]
cls.add_method('AddAtEnd',
'void',
[param('ns3::Ptr< ns3::Packet const >', 'packet')])
## packet.h (module 'network'): void ns3::Packet::AddByteTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddByteTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddHeader(ns3::Header const & header) [member function]
cls.add_method('AddHeader',
'void',
[param('ns3::Header const &', 'header')])
## packet.h (module 'network'): void ns3::Packet::AddPacketTag(ns3::Tag const & tag) const [member function]
cls.add_method('AddPacketTag',
'void',
[param('ns3::Tag const &', 'tag')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::AddPaddingAtEnd(uint32_t size) [member function]
cls.add_method('AddPaddingAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::AddTrailer(ns3::Trailer const & trailer) [member function]
cls.add_method('AddTrailer',
'void',
[param('ns3::Trailer const &', 'trailer')])
## packet.h (module 'network'): ns3::PacketMetadata::ItemIterator ns3::Packet::BeginItem() const [member function]
cls.add_method('BeginItem',
'ns3::PacketMetadata::ItemIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::Packet >',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::CopyData(uint8_t * buffer, uint32_t size) const [member function]
cls.add_method('CopyData',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::CopyData(std::ostream * os, uint32_t size) const [member function]
cls.add_method('CopyData',
'void',
[param('std::ostream *', 'os'), param('uint32_t', 'size')],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::Packet> ns3::Packet::CreateFragment(uint32_t start, uint32_t length) const [member function]
cls.add_method('CreateFragment',
'ns3::Ptr< ns3::Packet >',
[param('uint32_t', 'start'), param('uint32_t', 'length')],
is_const=True)
## packet.h (module 'network'): static void ns3::Packet::EnableChecking() [member function]
cls.add_method('EnableChecking',
'void',
[],
is_static=True)
## packet.h (module 'network'): static void ns3::Packet::EnablePrinting() [member function]
cls.add_method('EnablePrinting',
'void',
[],
is_static=True)
## packet.h (module 'network'): bool ns3::Packet::FindFirstMatchingByteTag(ns3::Tag & tag) const [member function]
cls.add_method('FindFirstMatchingByteTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): ns3::ByteTagIterator ns3::Packet::GetByteTagIterator() const [member function]
cls.add_method('GetByteTagIterator',
'ns3::ByteTagIterator',
[],
is_const=True)
## packet.h (module 'network'): ns3::Ptr<ns3::NixVector> ns3::Packet::GetNixVector() const [member function]
cls.add_method('GetNixVector',
'ns3::Ptr< ns3::NixVector >',
[],
is_const=True)
## packet.h (module 'network'): ns3::PacketTagIterator ns3::Packet::GetPacketTagIterator() const [member function]
cls.add_method('GetPacketTagIterator',
'ns3::PacketTagIterator',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::GetSize() const [member function]
cls.add_method('GetSize',
'uint32_t',
[],
is_const=True)
## packet.h (module 'network'): uint64_t ns3::Packet::GetUid() const [member function]
cls.add_method('GetUid',
'uint64_t',
[],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekHeader(ns3::Header & header) const [member function]
cls.add_method('PeekHeader',
'uint32_t',
[param('ns3::Header &', 'header')],
is_const=True)
## packet.h (module 'network'): bool ns3::Packet::PeekPacketTag(ns3::Tag & tag) const [member function]
cls.add_method('PeekPacketTag',
'bool',
[param('ns3::Tag &', 'tag')],
is_const=True)
## packet.h (module 'network'): uint32_t ns3::Packet::PeekTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('PeekTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): void ns3::Packet::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintByteTags(std::ostream & os) const [member function]
cls.add_method('PrintByteTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::PrintPacketTags(std::ostream & os) const [member function]
cls.add_method('PrintPacketTags',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::RemoveAllByteTags() [member function]
cls.add_method('RemoveAllByteTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAllPacketTags() [member function]
cls.add_method('RemoveAllPacketTags',
'void',
[])
## packet.h (module 'network'): void ns3::Packet::RemoveAtEnd(uint32_t size) [member function]
cls.add_method('RemoveAtEnd',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): void ns3::Packet::RemoveAtStart(uint32_t size) [member function]
cls.add_method('RemoveAtStart',
'void',
[param('uint32_t', 'size')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveHeader(ns3::Header & header) [member function]
cls.add_method('RemoveHeader',
'uint32_t',
[param('ns3::Header &', 'header')])
## packet.h (module 'network'): bool ns3::Packet::RemovePacketTag(ns3::Tag & tag) [member function]
cls.add_method('RemovePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::RemoveTrailer(ns3::Trailer & trailer) [member function]
cls.add_method('RemoveTrailer',
'uint32_t',
[param('ns3::Trailer &', 'trailer')])
## packet.h (module 'network'): bool ns3::Packet::ReplacePacketTag(ns3::Tag & tag) [member function]
cls.add_method('ReplacePacketTag',
'bool',
[param('ns3::Tag &', 'tag')])
## packet.h (module 'network'): uint32_t ns3::Packet::Serialize(uint8_t * buffer, uint32_t maxSize) const [member function]
cls.add_method('Serialize',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint32_t', 'maxSize')],
is_const=True)
## packet.h (module 'network'): void ns3::Packet::SetNixVector(ns3::Ptr<ns3::NixVector> nixVector) [member function]
cls.add_method('SetNixVector',
'void',
[param('ns3::Ptr< ns3::NixVector >', 'nixVector')])
## packet.h (module 'network'): std::string ns3::Packet::ToString() const [member function]
cls.add_method('ToString',
'std::string',
[],
is_const=True)
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3UintegerValue_methods(root_module, cls):
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue() [constructor]
cls.add_constructor([])
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue(ns3::UintegerValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UintegerValue const &', 'arg0')])
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue(uint64_t const & value) [constructor]
cls.add_constructor([param('uint64_t const &', 'value')])
## uinteger.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::UintegerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## uinteger.h (module 'core'): bool ns3::UintegerValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## uinteger.h (module 'core'): uint64_t ns3::UintegerValue::Get() const [member function]
cls.add_method('Get',
'uint64_t',
[],
is_const=True)
## uinteger.h (module 'core'): std::string ns3::UintegerValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## uinteger.h (module 'core'): void ns3::UintegerValue::Set(uint64_t const & value) [member function]
cls.add_method('Set',
'void',
[param('uint64_t const &', 'value')])
return
def register_Ns3AddressChecker_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')])
return
def register_Ns3AddressValue_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor]
cls.add_constructor([param('ns3::Address const &', 'value')])
## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Address',
[],
is_const=True)
## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Address const &', 'value')])
return
def register_Ns3LrWpanNetDevice_methods(root_module, cls):
## lr-wpan-net-device.h (module 'lr-wpan'): ns3::LrWpanNetDevice::LrWpanNetDevice(ns3::LrWpanNetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LrWpanNetDevice const &', 'arg0')])
## lr-wpan-net-device.h (module 'lr-wpan'): ns3::LrWpanNetDevice::LrWpanNetDevice() [constructor]
cls.add_constructor([])
## lr-wpan-net-device.h (module 'lr-wpan'): void ns3::LrWpanNetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): int64_t ns3::LrWpanNetDevice::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')])
## lr-wpan-net-device.h (module 'lr-wpan'): ns3::Address ns3::LrWpanNetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): ns3::Address ns3::LrWpanNetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): ns3::Ptr<ns3::Channel> ns3::LrWpanNetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): ns3::Ptr<ns3::LrWpanCsmaCa> ns3::LrWpanNetDevice::GetCsmaCa() const [member function]
cls.add_method('GetCsmaCa',
'ns3::Ptr< ns3::LrWpanCsmaCa >',
[],
is_const=True)
## lr-wpan-net-device.h (module 'lr-wpan'): uint32_t ns3::LrWpanNetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): ns3::Ptr<ns3::LrWpanMac> ns3::LrWpanNetDevice::GetMac() const [member function]
cls.add_method('GetMac',
'ns3::Ptr< ns3::LrWpanMac >',
[],
is_const=True)
## lr-wpan-net-device.h (module 'lr-wpan'): uint16_t ns3::LrWpanNetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): ns3::Address ns3::LrWpanNetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): ns3::Address ns3::LrWpanNetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): ns3::Ptr<ns3::Node> ns3::LrWpanNetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): ns3::Ptr<ns3::LrWpanPhy> ns3::LrWpanNetDevice::GetPhy() const [member function]
cls.add_method('GetPhy',
'ns3::Ptr< ns3::LrWpanPhy >',
[],
is_const=True)
## lr-wpan-net-device.h (module 'lr-wpan'): static ns3::TypeId ns3::LrWpanNetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## lr-wpan-net-device.h (module 'lr-wpan'): bool ns3::LrWpanNetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): bool ns3::LrWpanNetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): bool ns3::LrWpanNetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): bool ns3::LrWpanNetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): bool ns3::LrWpanNetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): void ns3::LrWpanNetDevice::McpsDataIndication(ns3::McpsDataIndicationParams params, ns3::Ptr<ns3::Packet> pkt) [member function]
cls.add_method('McpsDataIndication',
'void',
[param('ns3::McpsDataIndicationParams', 'params'), param('ns3::Ptr< ns3::Packet >', 'pkt')])
## lr-wpan-net-device.h (module 'lr-wpan'): bool ns3::LrWpanNetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): bool ns3::LrWpanNetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): bool ns3::LrWpanNetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): void ns3::LrWpanNetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): void ns3::LrWpanNetDevice::SetChannel(ns3::Ptr<ns3::SpectrumChannel> channel) [member function]
cls.add_method('SetChannel',
'void',
[param('ns3::Ptr< ns3::SpectrumChannel >', 'channel')])
## lr-wpan-net-device.h (module 'lr-wpan'): void ns3::LrWpanNetDevice::SetCsmaCa(ns3::Ptr<ns3::LrWpanCsmaCa> csmaca) [member function]
cls.add_method('SetCsmaCa',
'void',
[param('ns3::Ptr< ns3::LrWpanCsmaCa >', 'csmaca')])
## lr-wpan-net-device.h (module 'lr-wpan'): void ns3::LrWpanNetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): void ns3::LrWpanNetDevice::SetMac(ns3::Ptr<ns3::LrWpanMac> mac) [member function]
cls.add_method('SetMac',
'void',
[param('ns3::Ptr< ns3::LrWpanMac >', 'mac')])
## lr-wpan-net-device.h (module 'lr-wpan'): bool ns3::LrWpanNetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): void ns3::LrWpanNetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): void ns3::LrWpanNetDevice::SetPhy(ns3::Ptr<ns3::LrWpanPhy> phy) [member function]
cls.add_method('SetPhy',
'void',
[param('ns3::Ptr< ns3::LrWpanPhy >', 'phy')])
## lr-wpan-net-device.h (module 'lr-wpan'): void ns3::LrWpanNetDevice::SetPromiscReceiveCallback(ns3::Callback<bool,ns3::Ptr<ns3::NetDevice>,ns3::Ptr<const ns3::Packet>,short unsigned int,const ns3::Address&,const ns3::Address&,ns3::NetDevice::PacketType,ns3::empty,ns3::empty,ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, short unsigned int, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): void ns3::LrWpanNetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): bool ns3::LrWpanNetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_const=True, is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): void ns3::LrWpanNetDevice::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
## lr-wpan-net-device.h (module 'lr-wpan'): void ns3::LrWpanNetDevice::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_Hash(module.get_submodule('Hash'), root_module)
register_functions_ns3_TracedValueCallback(module.get_submodule('TracedValueCallback'), root_module)
register_functions_ns3_internal(module.get_submodule('internal'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def register_functions_ns3_TracedValueCallback(module, root_module):
return
def register_functions_ns3_internal(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
|
gpl-2.0
| 7,679,634,221,064,839,000
| 64.780503
| 448
| 0.616444
| false
| 3.635798
| false
| false
| false
|
panholt/sparkpy
|
sparkpy/models/room.py
|
1
|
4745
|
from .base import SparkBase, SparkProperty
from .time import SparkTime
from .message import SparkMessage
from .membership import SparkMembership
from .container import SparkContainer
class SparkRoom(SparkBase):
# | Start of class attributes |-------------------------------------------|
API_BASE = 'https://api.ciscospark.com/v1/rooms/'
PROPERTIES = {'id': SparkProperty('id'),
'title': SparkProperty('title', mutable=True),
'type': SparkProperty('type'),
'isLocked': SparkProperty('islocked',
optional=True),
'lastActivity': SparkProperty('lastActivity',
optional=True),
'created': SparkProperty('created'),
'creatorId': SparkProperty('creatorId'),
'sipAddress': SparkProperty('sipAddress', optional=True),
'teamId': SparkProperty('teamId', optional=True)}
# | Start of instance attributes |----------------------------------------|
def __init__(self, *args, **kwargs):
super().__init__(*args, path='rooms', **kwargs)
def update(self, key, value):
if key == 'title' and len(value):
self.parent.session.put(self.url, json={key: value})
elif key == 'isLocked':
raise NotImplemented('isLocked is not implemnted')
return
@property
def members(self):
''' Members of the Cisco Spark Room
:getter: a generator like object of members of the room
:type: `SparkContainer` of `SparkPeople` items
'''
return SparkContainer(SparkMembership,
params={'roomId': self.id},
parent=self)
@property
def messages(self):
''' Messages in the Cisco Spark Room
:getter: a generator like object of members of the room
:type: `SparkContainer` of `SparkPeople` items
'''
return SparkContainer(SparkMessage,
params=self.message_params,
parent=self)
@property
def link(self):
return f'https://web.ciscospark.com/rooms/{self.uuid}/chat'
@property
def message_params(self):
''' Retuns URL paramaters for /messages/
Sets the `roomId` filter and if the session owner is a bot,
the `mentionedPeople` filter is set to `me`
:getter: url paramaters
:type: dict
'''
data = {'roomId': self.id}
if self.parent.is_bot and self.type == 'group':
data['mentionedPeople'] = 'me'
return data
def send_message(self, text, file=None):
''' Send a message to the room
:param text: Markdown formatted text to send in message
:type title: str
:return: None
'''
self.parent.send_message(text, room_id=self.id, file=file)
return
def add_member(self, *args, email='', moderator=False):
''' Add a person to the room
:param email: email address of person to add
:type email: str
:param moderator: Default: False, Make person a moderator of room
:type moderator: bool
:return: None
'''
data = {'roomId': self.id}
if args:
# TODO Type checking
data['personId'] = args[0]
if '@' in email:
data['personEmail'] = email
if moderator:
data['isModerator'] = moderator
self.parent.session.post(SparkMembership.API_BASE, json=data)
return
def remove_member(self, *args, email=''):
''' Add a person to the room
:param email: email address of person to add
:type email: str
:param moderator: Default: False, Make person a moderator of room
:type moderator: bool
:return: None
'''
if args:
for member in self.members.filtered(lambda
x: x.personId == args[0]):
member.delete()
elif '@' in email:
for member in self.members.filtered(lambda
x: x.personEmail == email):
member.delete()
return
def remove_all_members(self):
''' Remove all people from the room leaving this account
:return: None
'''
for member in self.members.filtered(lambda x: x != self.parent.me.id):
member.delete()
return
def __repr__(self):
return f"SparkRoom('{self.id}')"
|
mit
| 6,291,617,384,162,153,000
| 33.136691
| 79
| 0.525395
| false
| 4.629268
| false
| false
| false
|
prechelt/unread-decorator
|
setup.py
|
1
|
3307
|
# based on https://github.com/pypa/sampleproject/blob/master/setup.py
# see http://packaging.python.org/en/latest/tutorial.html#creating-your-own-project
from setuptools import setup, find_packages
from setuptools.command.install import install as stdinstall
import codecs
import os
import re
import sys
def find_version(*file_paths):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, *file_paths), 'r', 'latin1') as f:
version_file = f.read()
# The version line must have the form
# __version__ = 'ver'
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
def get_file_contents(filename):
with codecs.open(filename, encoding='utf-8') as f:
contents = f.read()
return contents
package_name = "unread-decorator"
setup(
# basic information:
name=package_name,
version=find_version('unread_decorator.py'),
description="unread() for streams, unnext() for iterators",
long_description=get_file_contents("README.rst"),
# The project URL:
url='http://github.com/prechelt/' + package_name,
# Author details:
author='Lutz Prechelt',
author_email='prechelt@inf.fu-berlin.de',
# Classification:
license='BSD License',
classifiers=[
'License :: OSI Approved :: BSD License',
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
keywords='undo, I/O, iterator',
py_modules=['unread_decorator'],
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages.
packages=find_packages(exclude=["contrib", "docs", "tests*"]),
# List run-time dependencies here. These will be installed by pip when your
# project is installed.
install_requires = [],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
# 'mypackage': ['package_data.dat'],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
###data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
### entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
|
bsd-2-clause
| 5,754,088,370,807,579,000
| 32.755102
| 91
| 0.643786
| false
| 3.890588
| false
| false
| false
|
askalbania/piernik
|
problems/mcrwind/piernik_problem.py
|
1
|
1992
|
#!/usr/bin/python
import sys
import numpy as np
import matplotlib
matplotlib.use('cairo')
from yt.mods import load as yt_load
from pylab import *
THRESHOLD = 1e-9
FIELD = "cr1"
def _myplot(diff, fname, ext, clbl):
v = abs(diff).max()
figure(1, (6, 8))
imshow(diff, vmin=-v, vmax=v, extent=ext, cmap='RdBu')
bar = colorbar()
bar.ax.set_xlabel(clbl)
draw()
xlabel('y [pc]')
ylabel('z [pc]')
savefig(fname)
clf()
def plot_diff(pf1, pf2, data1, data2, field):
wd = pf1.domain_width
n_d = pf1.domain_dimensions
ext = np.array([pf1.domain_left_edge[1], pf1.domain_right_edge[1],
pf1.domain_left_edge[2], pf1.domain_right_edge[2]])
ext *= pf1['pc']
img1 = data1.to_frb(wd[1], (n_d[2] * 10, n_d[1] * 10),
center=np.array([0, 0, 0]), height=wd[2])
img2 = data2.to_frb(wd[1], (n_d[2] * 10, n_d[1] * 10),
center=np.array([0, 0, 0]), height=wd[2])
diff = (img2[field] - img1[field])
clbl = \
r"$\rm{%s}^{\rm{new}} - \rm{%s}^{\rm{old}}$" % (field, field)
_myplot(diff, 'diff_bare.png', ext, clbl)
clbl = \
r"$\frac{\rm{%s}^{\rm{new}} - \rm{%s}^{\rm{old}}}{\rm{%s}^{\rm{old}}}$" % (field, field, field)
_myplot(diff / (img1[field] + THRESHOLD), 'diff.png', ext, clbl)
if len(sys.argv) != 3:
print("Wrong number of arguments!")
sys.exit(-1)
PF1 = yt_load(sys.argv[1])
PF2 = yt_load(sys.argv[2])
axis = np.where(PF1.h.grids[0].ActiveDimensions == 1)[0][0]
DATA1 = PF1.h.slice(axis, 0.0, fields=[FIELD])
DATA2 = PF2.h.slice(axis, 0.0, fields=[FIELD])
if not PF1.h.field_list == PF2.h.field_list:
print("Fields in files differ!")
sys.exit(-1)
for field in PF1.h.field_list:
if abs(DATA1[field] - DATA2[field]).max() >= THRESHOLD:
print("Field %s differs" % field)
plot_diff(PF1, PF2, DATA1, DATA2, field)
sys.exit(-1)
figure(1, (8,6))
draw()
savefig('diff.png')
savefig('diff_bare.png')
|
gpl-3.0
| -1,219,925,660,328,792,000
| 28.294118
| 103
| 0.566767
| false
| 2.50566
| false
| false
| false
|
ccoakley/dbcbet
|
dbcbet/test/dbcbet_test.py
|
1
|
7562
|
"""Test dbcbet"""
from dbcbet.dbcbet import pre, post, inv, throws, dbc, bet, finitize, finitize_method, ContractViolation, ThrowsViolation
from dbcbet.helpers import state, argument_types
#
# These methods are the various preconditions, postconditions, and invariants used by tests
#
# a precondition
def both_numbers_positive(self, arg1, arg2):
return arg1 > 0 and arg2 > 0
# a (necessary) precondition
def first_greater_than_second(self, arg1, arg2):
return arg1 > arg2
# a postcondition
def returned_the_sum(self, old, ret, arg1, arg2):
return ret == arg1+arg2
# another postcondition
def set_x(self, old, ret, arg1, arg2):
return self.x == arg1-arg2
# an invariant
def x_non_negative(self):
return self.x >= 0
# a finitization
def finitize_example_class():
return {'x':[-1,0,1,2]}
# Pete: this seems like a typical case. Maybe the finitization should just be the returned hash, and not a function.
#
# showing off the syntax
#
# applying invariant to class, precondition and postconditions to the method
@inv(x_non_negative)
@finitize(finitize_example_class)
class ExampleClass:
def __init__(self):
self.x = 0
@finitize_method([-1,0,1,2,3],range(-1,3))
@pre(both_numbers_positive)
@pre(first_greater_than_second)
@post(set_x)
@post(returned_the_sum)
def do_something(self, a1, a2):
self.x = a1-a2
return a1+a2
# Tests
def test_bet():
bet(ExampleClass).run()
#
# A more complicated test with inheritance
#
def base_class_inv(self):
if hasattr(self, "x"):
return self.x != 1
else:
return True
def sub_class_inv(self):
if hasattr(self, "x"):
return self.x != 2
else:
return True
def base_class_method_pre(self, a):
return a != 3
def sub_class_method_pre(self, a):
return a != 4
def base_class_method_post(self, old, ret, a):
return a != 5
def sub_class_method_post(self, old, ret, a):
return a != 6
def sub_class_method_pre2(self, a):
return a != 7
def sub_class_method_post2(self, old, ret, a):
return a != 8
@inv(base_class_inv)
class TestBaseClass(object):
@pre(base_class_method_pre)
@post(base_class_method_post)
def a_method(self, a):
self.x = a
@inv(sub_class_inv)
class TestSubClass(TestBaseClass):
@pre(sub_class_method_pre)
@pre(sub_class_method_pre2)
@post(sub_class_method_post)
@post(sub_class_method_post2)
@finitize_method(range(-1,10))
def a_method(self, a):
self.x = a+1
def test_inheritance():
bet(TestSubClass).run()
print "Individual Tests"
explicit_success(TestSubClass, -1)
explicit_fail(TestSubClass, 0)
explicit_fail(TestSubClass, 1)
explicit_success(TestSubClass, 2)
explicit_success(TestSubClass, 3)
explicit_success(TestSubClass, 4)
explicit_fail(TestSubClass, 5)
explicit_fail(TestSubClass, 6)
explicit_success(TestSubClass, 7)
explicit_fail(TestSubClass, 8)
explicit_success(TestSubClass, 9)
def test_solo_composition():
test_only_pre()
test_only_post()
test_only_inv()
class TestOnlyPre(object):
@pre(sub_class_method_pre)
@pre(sub_class_method_pre2)
def a_method(self, a):
self.x = a+1
def test_only_pre():
explicit_fail(TestOnlyPre, 4)
explicit_success(TestOnlyPre, 5)
explicit_fail(TestOnlyPre, 7)
class TestOnlyPost(object):
@post(sub_class_method_post)
@post(sub_class_method_post2)
def a_method(self, a):
self.x = a+1
def test_only_post():
explicit_fail(TestOnlyPost, 6)
explicit_success(TestOnlyPost, 7)
explicit_fail(TestOnlyPost, 8)
@inv(base_class_inv)
@inv(sub_class_inv)
class TestOnlyInv(object):
def a_method(self, a):
self.x = a+1
def test_only_inv():
explicit_success(TestOnlyInv, -1)
explicit_fail(TestOnlyInv, 0)
explicit_fail(TestOnlyInv, 1)
explicit_success(TestOnlyInv, 2)
def explicit_fail(class_, val):
t = class_()
try:
t.a_method(val)
assert False, str(val) + " worked, should have failed"
except ContractViolation as cv:
assert True
def explicit_success(class_, val):
t = class_()
try:
t.a_method(val)
assert True
except ContractViolation as cv:
assert False, str(val) + " failed, should have worked: " + str(cv)
class GoodException(Exception):
pass
class BadException(Exception):
pass
class AnotherGoodException(GoodException):
pass
class ADifferentGoodException(Exception):
pass
class ThrowsTestClass(object):
@throws(ADifferentGoodException)
@throws(GoodException)
def do_something(self, x):
if x==1:
# allowed
raise GoodException()
if x==2:
# allowed
raise AnotherGoodException()
if x==3:
# allowed
raise ADifferentGoodException()
# not allowed
raise BadException()
@dbc
class ThrowsTestSubClass(ThrowsTestClass):
@throws(AnotherGoodException)
def do_something(self, x):
if x==1:
# not allowed
raise GoodException()
if x==2:
# allowed
raise AnotherGoodException()
if x==3:
# not allowed
raise ADifferentGoodException()
# not allowed
raise BadException()
@dbc
class ThrowsTestSubSubClass(ThrowsTestSubClass):
def do_something(self, x):
if x==1:
# not allowed
raise GoodException()
if x==2:
# allowed
raise AnotherGoodException()
if x==3:
# not allowed
raise ADifferentGoodException()
# not allowed
raise BadException()
def test_throws():
try:
ThrowsTestClass().do_something(1)
except GoodException:
print "GoodException worked"
try:
ThrowsTestClass().do_something(2)
except GoodException:
print "GoodException worked"
try:
ThrowsTestClass().do_something(3)
except ADifferentGoodException:
print "ADifferentGoodException worked"
try:
ThrowsTestClass().do_something(4)
except ThrowsViolation:
print "Translating BadException to ThrowsViolation worked"
try:
ThrowsTestSubClass().do_something(1)
except ThrowsViolation:
print "Translating GoodException to ThrowsViolation on subclass worked"
try:
ThrowsTestSubClass().do_something(2)
except GoodException:
print "GoodException worked"
try:
ThrowsTestSubClass().do_something(3)
except ThrowsViolation:
print "Translating ADifferentGoodException worked"
try:
ThrowsTestSubClass().do_something(4)
except ThrowsViolation:
print "Translating BadException to ThrowsViolation worked"
try:
ThrowsTestSubSubClass().do_something(1)
except ThrowsViolation:
print "Translating GoodException to ThrowsViolation on subsubclass worked"
try:
ThrowsTestSubSubClass().do_something(2)
except GoodException:
print "GoodException worked"
try:
ThrowsTestSubSubClass().do_something(3)
except ThrowsViolation:
print "Translating ADifferentGoodException worked"
try:
ThrowsTestSubSubClass().do_something(4)
except ThrowsViolation:
print "Translating BadException to ThrowsViolation worked"
if __name__ == "__main__":
test_inheritance()
test_throws()
test_bet()
test_solo_composition()
|
mit
| 8,137,722,210,013,781,000
| 24.633898
| 121
| 0.644406
| false
| 3.449818
| true
| false
| false
|
espdev/readthedocs.org
|
readthedocs/restapi/views/search_views.py
|
1
|
4951
|
import logging
from rest_framework import decorators, permissions, status
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from readthedocs.builds.constants import LATEST
from readthedocs.builds.models import Version
from readthedocs.projects.models import Project, ProjectRelationship
from readthedocs.search.lib import search_file, search_project, search_section
from readthedocs.restapi import utils
log = logging.getLogger(__name__)
@decorators.api_view(['POST'])
@decorators.permission_classes((permissions.IsAdminUser,))
@decorators.renderer_classes((JSONRenderer,))
def index_search(request):
"""Add things to the search index"""
data = request.DATA['data']
version_pk = data['version_pk']
commit = data.get('commit')
version = Version.objects.get(pk=version_pk)
project_scale = 1
page_scale = 1
utils.index_search_request(
version=version, page_list=data['page_list'], commit=commit,
project_scale=project_scale, page_scale=page_scale)
return Response({'indexed': True})
@decorators.api_view(['GET'])
@decorators.permission_classes((permissions.AllowAny,))
@decorators.renderer_classes((JSONRenderer,))
def search(request):
"""Perform search, supplement links by resolving project domains"""
project_slug = request.GET.get('project', None)
version_slug = request.GET.get('version', LATEST)
query = request.GET.get('q', None)
if project_slug is None or query is None:
return Response({'error': 'Need project and q'},
status=status.HTTP_400_BAD_REQUEST)
try:
project = Project.objects.get(slug=project_slug)
except Project.DoesNotExist:
return Response({'error': 'Project not found'},
status=status.HTTP_404_NOT_FOUND)
log.debug("(API Search) %s", query)
results = search_file(request=request, project_slug=project_slug,
version_slug=version_slug, query=query)
# Supplement result paths with domain information on project
hits = results.get('hits', {}).get('hits', [])
for (n, hit) in enumerate(hits):
fields = hit.get('fields', {})
search_project = fields.get('project')[0]
search_version = fields.get('version')[0]
path = fields.get('path')[0]
canonical_url = project.get_docs_url(version_slug=version_slug)
if search_project != project_slug:
try:
subproject = project.subprojects.get(child__slug=search_project)
canonical_url = subproject.child.get_docs_url(
version_slug=search_version
)
except ProjectRelationship.DoesNotExist:
pass
results['hits']['hits'][n]['fields']['link'] = (
canonical_url + path
)
return Response({'results': results})
@decorators.api_view(['GET'])
@decorators.permission_classes((permissions.AllowAny,))
@decorators.renderer_classes((JSONRenderer,))
def project_search(request):
query = request.GET.get('q', None)
if query is None:
return Response({'error': 'Need project and q'}, status=status.HTTP_400_BAD_REQUEST)
log.debug("(API Project Search) %s", (query))
results = search_project(request=request, query=query)
return Response({'results': results})
@decorators.api_view(['GET'])
@decorators.permission_classes((permissions.AllowAny,))
@decorators.renderer_classes((JSONRenderer,))
def section_search(request):
"""Section search
Queries with query ``q`` across all documents and projects. Queries can be
limited to a single project or version by using the ``project`` and
``version`` GET arguments in your request.
When you search, you will have a ``project`` facet, which includes the
number of matching sections per project. When you search inside a project,
the ``path`` facet will show the number of matching sections per page.
Possible GET args
-----------------
q **(required)**
The query string **Required**
project
A project slug
version
A version slug
path
A file path slug
Example::
GET /api/v2/search/section/?q=virtualenv&project=django
"""
query = request.GET.get('q', None)
if not query:
return Response(
{'error': 'Search term required. Use the "q" GET arg to search. '},
status=status.HTTP_400_BAD_REQUEST)
project_slug = request.GET.get('project', None)
version_slug = request.GET.get('version', LATEST)
path = request.GET.get('path', None)
log.debug("(API Section Search) [%s:%s] %s", project_slug, version_slug,
query)
results = search_section(
request=request,
query=query,
project_slug=project_slug,
version_slug=version_slug,
path=path,
)
return Response({'results': results})
|
mit
| 4,044,174,051,605,192,000
| 33.381944
| 92
| 0.654211
| false
| 4.088357
| false
| false
| false
|
zerothi/sids
|
sisl/utils/ranges.py
|
1
|
8369
|
import re
from itertools import groupby
from numpy import zeros, ones, cumsum, take, int32, int64
from numpy import asarray
__all__ = ["strmap", "strseq", "lstranges", "erange", "list2str", "fileindex"]
__all__ += ["array_arange"]
# Function to change a string to a range of integers
def strmap(func, s, start=None, end=None, sep="b"):
""" Parse a string as though it was a slice and map all entries using ``func``.
Parameters
----------
func : function
function to parse every match with
s : str
the string that should be parsed
start : optional
the replacement in case the LHS of the delimiter is not present
end : optional
the replacement in case the RHS of the delimiter is not present
sep : {"b", "c"}
separator used, ``"b"`` is square brackets, ``"c"``, curly braces
Examples
--------
>>> strmap(int, "1")
[1]
>>> strmap(int, "1-2")
[(1, 2)]
>>> strmap(int, "1-")
[(1, None)]
>>> strmap(int, "1-", end=4)
[(1, 4)]
>>> strmap(int, "1-10[2-3]")
[((1, 10), [(2, 3)])]
"""
if sep == "b":
segment = re.compile(r"\[(.+)\]\[(.+)\]|(.+)\[(.+)\]|(.+)")
sep1, sep2 = "[", "]"
elif sep == "c":
segment = re.compile(r"\{(.+)\}\{(.+)\}|(.+)\{(.+)\}|(.+)")
sep1, sep2 = "{", "}"
else:
raise ValueError("strmap: unknown separator for the sequence")
# Create list
s = s.replace(" ", "")
if len(s) == 0:
return [None]
elif s in ["-", ":"]:
return [(start, end)]
commas = s.split(",")
# Collect all the comma separated quantities that
# may be selected by [..,..]
i = 0
while i < len(commas) - 1:
if commas[i].count(sep1) == commas[i].count(sep2):
i = i + 1
else:
# there must be more [ than ]
commas[i] = commas[i] + "," + commas[i+1]
del commas[i+1]
# Check the last input...
i = len(commas) - 1
if commas[i].count(sep1) != commas[i].count(sep2):
raise ValueError(f"Unbalanced string: not enough {sep1} and {sep2}")
# Now we have a comma-separated list
# with collected brackets.
l = []
for seg in commas:
# Split it in groups of reg-exps
m = segment.findall(seg)[0]
if len(m[0]) > 0:
# this is: [..][..]
rhs = strmap(func, m[1], start, end, sep)
for el in strmap(func, m[0], start, end, sep):
l.append((el, rhs))
elif len(m[2]) > 0:
# this is: ..[..]
l.append((strseq(func, m[2], start, end),
strmap(func, m[3], start, end, sep)))
elif len(m[4]) > 0:
l.append(strseq(func, m[4], start, end))
return l
def strseq(cast, s, start=None, end=None):
""" Accept a string and return the casted tuples of content based on ranges.
Parameters
----------
cast : function
parser of the individual elements
s : str
string with content
Examples
--------
>>> strseq(int, "3")
3
>>> strseq(int, "3-6")
(3, 6)
>>> strseq(int, "3-")
(3, None)
>>> strseq(int, "3:2:7")
(3, 2, 7)
>>> strseq(int, "3:2:", end=8)
(3, 2, 8)
>>> strseq(int, ":2:", start=2)
(2, 2, None)
>>> strseq(float, "3.2:6.3")
(3.2, 6.3)
"""
if ":" in s:
s = [ss.strip() for ss in s.split(":")]
elif "-" in s:
s = [ss.strip() for ss in s.split("-")]
if isinstance(s, list):
if len(s[0]) == 0:
s[0] = start
if len(s[-1]) == 0:
s[-1] = end
return tuple(cast(ss) if ss is not None else None for ss in s)
return cast(s)
def erange(start, step, end=None):
""" Returns the range with both ends includede """
if end is None:
return range(start, step + 1)
return range(start, end + 1, step)
def lstranges(lst, cast=erange, end=None):
""" Convert a `strmap` list into expanded ranges """
l = []
# If an entry is a tuple, it means it is either
# a range 0-1 == tuple(0, 1), or
# a sub-range
# 0[0-1], 0-1[0-1]
if isinstance(lst, tuple):
if len(lst) == 3:
l.extend(cast(*lst))
else:
head = lstranges(lst[0], cast, end)
bot = lstranges(lst[1], cast, end)
if isinstance(head, list):
for el in head:
l.append([el, bot])
elif isinstance(bot, list):
l.append([head, bot])
else:
l.extend(cast(head, bot))
elif isinstance(lst, list):
for lt in lst:
ls = lstranges(lt, cast, end)
if isinstance(ls, list):
l.extend(ls)
else:
l.append(ls)
else:
if lst is None and end is not None:
return cast(0, end)
return lst
return l
def list2str(lst):
""" Convert a list of elements into a string of ranges
Examples
--------
>>> list2str([2, 4, 5, 6])
"2, 4-6"
>>> list2str([2, 4, 5, 6, 8, 9])
"2, 4-6, 8-9"
"""
lst = lst[:]
lst.sort()
# Create positions
pos = [j - i for i, j in enumerate(lst)]
t = 0
rng = ""
for _, els in groupby(pos):
ln = len(list(els))
el = lst[t]
if t > 0:
rng += ", "
t += ln
if ln == 1:
rng += str(el)
#elif ln == 2:
# rng += "{}, {}".format(str(el), str(el+ln-1))
else:
rng += "{}-{}".format(el, el+ln-1)
return rng
# Function to retrieve an optional index from the
# filename
# file[0] returns:
# file, 0
# file returns:
# file, None
# file[0-1] returns
# file, [0,1]
def fileindex(f, cast=int):
""" Parses a filename string into the filename and the indices.
This range can be formatted like this:
file[1,2,3-6]
in which case it will return:
file, [1,2,3,4,5,6]
Parameters
----------
f : str
filename to parse
cast : function
the function to cast the bracketed value
Examples
--------
>>> fileindex("Hello[0]")
("Hello", 0)
>>> fileindex("Hello[0-2]")
("Hello", [0, 1, 2])
"""
if "[" not in f:
return f, None
# Grab the filename
f = f.split("[")
fname = f.pop(0)
# Re-join and remove the last "]"
f = "[".join(f)
if f[-1] == "]":
f = f[:-1]
ranges = strmap(cast, f)
rng = lstranges(ranges)
if len(rng) == 1:
return fname, rng[0]
return fname, rng
def array_arange(start, end=None, n=None, dtype=int64):
""" Creates a single array from a sequence of `numpy.arange`
Parameters
----------
start : array_like
a list of start elements for `numpy.arange`
end : array_like
a list of end elements (exclusive) for `numpy.arange`.
This argument is not used if `n` is passed.
n : array_like
a list of counts of elements for `numpy.arange`.
This is equivalent to ``end=start + n``.
dtype : numpy.dtype
the returned lists data-type
Examples
--------
>>> array_arange([1, 5], [3, 6])
array([1, 2, 5], dtype=int64)
>>> array_arange([1, 6], [4, 9])
array([1, 2, 3, 6, 7, 8], dtype=int64)
>>> array_arange([1, 6], n=[2, 2])
array([1, 2, 6, 7], dtype=int64)
"""
# Tests show that the below code is faster than
# implicit for-loops, or list-comprehensions
# concatenate(map(..)
# The below is much faster and does not require _any_ loops
if n is None:
# We need n to speed things up
n = asarray(end) - asarray(start)
else:
n = asarray(n)
# The below algorithm only works for non-zero n
idx = n.nonzero()[0]
# Grab corner case
if len(idx) == 0:
return zeros(0, dtype=dtype)
# Reduce size
start = take(start, idx)
n = take(n, idx)
# Create array of 1's.
# The 1's are important when issuing the cumultative sum
a = ones(n.sum(), dtype=dtype)
# set pointers such that we can
# correct for final cumsum
ptr = cumsum(n[:-1])
a[0] = start[0]
# Define start and correct for previous values
a[ptr] = start[1:] - start[:-1] - n[:-1] + 1
return cumsum(a, dtype=dtype)
|
lgpl-3.0
| -7,897,043,921,294,780,000
| 25.400631
| 83
| 0.50699
| false
| 3.29618
| false
| false
| false
|
wowref/wowref.com
|
wowref/wotlk/dbc/lib/dbcfile.py
|
1
|
3843
|
#!/usr/bin/env python
import os
from struct import Struct
from .dtypes import *
UNICODE_BLANK = ''
class DBCRecord(object):
"""A simple object to convert a dict to an object."""
def __init__(self, d=None):
self.data = d
def __repr__(self):
return "<DBCRecord %r>" % self.data
def __getitem__(self, item):
print('hi')
return self.data[item]
def __getattr__(self, item):
item = self.data[item]
if isinstance(item, bytes):
item = item.decode('utf-8')
return item
class DBCFile(object):
"""Base representation of a DBC file."""
header_struct = Struct('4s4i')
def __init__(self, filename, skele=None, verbose=False):
self.filename = filename
if not hasattr(self, 'skeleton'):
self.skeleton = skele
self.__create_struct()
def __iter__(self):
"""Iterated based approach to the dbc reading."""
if not os.path.exists(self.filename):
raise Exception("File '%s' not found" % (self.filename,))
f = open(self.filename, 'rb')
f_read = f.read
# Read in header
sig, records, fields, record_size, string_block_size = \
self.header_struct.unpack(f_read(20))
# Check signature
if sig != b'WDBC':
f.close()
raise Exception('Invalid file type')
self.records = records
self.fields = fields
self.record_size = record_size
self.string_block_size = string_block_size
if not self.struct:
# If the struct doesn't exist, create a default one
self.skeleton = Array('data', Int32, fields)
self.__create_struct()
# Ensure that struct and record_size is the same
if self.struct.size != record_size:
f.close()
raise Exception('Struct size mismatch (%d != %d)' %
(self.struct.size, record_size))
struct_unpack = self.struct.unpack
# Read in string block
f.seek(20 + records * record_size)
self.string_block = f_read(string_block_size)
f.seek(20)
try:
for i in range(records):
yield self.__process_record(struct_unpack(f_read(record_size)))
finally:
f.close()
def __create_struct(self):
"""Creates a Struct from the Skeleton."""
if self.skeleton:
s = ['<']
for item in self.skeleton:
if isinstance(item, Array):
s.extend(x.c for x in item.items)
else:
s.append(item.c)
self.struct = Struct(''.join(s))
else:
self.struct = None
def __process_record(self, data):
"""Processes a record (row of data)."""
output = {}
data_iter = iter(data)
for field in self.skeleton:
if isinstance(field, Array):
output[field.name] = [
self.__process_field(item, next(data_iter)) for item in field.items
if not isinstance(item, PadByte)
]
elif not isinstance(field, PadByte):
output[field.name] = self.__process_field(field, next(data_iter))
return DBCRecord(output)
def __process_field(self, _type, data):
output = data
if isinstance(_type, String):
if data == 0:
output = UNICODE_BLANK
else:
if data > self.string_block_size or self.string_block[data - 1] != 0:
raise Exception('Invalid string')
output = self.string_block[data:self.string_block.find(0, data)]
if isinstance(output, bytes):
output = output.decode('utf-8')
return output
|
mit
| 2,913,251,282,306,833,000
| 30.243902
| 87
| 0.53604
| false
| 4.11016
| false
| false
| false
|
keith-lewis100/pont-workbench
|
main/data_models.py
|
1
|
7503
|
#_*_ coding: UTF-8 _*_
import logging
from google.appengine.api import users
from google.appengine.ext import ndb
import db
import mailer
import renderers
import urls
from role_types import RoleType
logger = logging.getLogger('model')
workbench = db.WorkBench.get_or_insert('main')
committee_labels=[
('AMB', 'Ambulance'),
('PHC', 'PrimaryHealth'),
('SEC', 'SecondaryHealth'),
('LIV', 'Livelihoods'),
('ENG', 'Engineering'),
('EDU', 'Education'),
('CHU', 'Churches'),
('WEC', 'Wildlife Centre'),
('GEN', 'General')]
if db.User.query().count() == 0:
user = db.User();
user.name = 'Keith'
user.email = 'keith.lewis@pont-mbale.org.uk'
key = user.put()
role = db.Role(parent=key)
role.type_index = RoleType.USER_ADMIN
role.committee = ''
role.put()
class Committee:
def __init__(self, id, name):
self.id = id
self.name = name
self.key = self
def kind(self):
return 'Committee'
def urlsafe(self):
return self.id
def parent(self):
return None
def get_committee_list():
return [Committee(id, name) for id, name in committee_labels]
def lookup_committee(c_id):
for id, name in committee_labels:
if id == c_id:
return Committee(id, name)
return None
def get_next_ref():
ref = workbench.last_ref_id + 1
workbench.last_ref_id = ref
workbench.put()
return ref
def lookup_entity(db_id):
key = create_key(db_id)
return key.get()
def create_key(db_id):
if db_id is None or db_id == "":
return None
return ndb.Key(urlsafe=db_id)
def get_parent(entity):
parent_key = entity.key.parent()
if parent_key is not None:
return parent_key.get()
if entity.key.kind() == 'Fund':
return lookup_committee(entity.committee)
return None
def lookup_user_by_email(email):
user = db.User.query(db.User.email == email).get()
if user is None:
user = db.User()
user.name = email
return user
def lookup_current_user():
email = users.get_current_user().email()
return lookup_user_by_email(email)
def logout_url():
return users.create_logout_url('/')
def calculate_transferred_amount(payment):
if payment is None or payment.transfer is None:
return ""
transfer = payment.transfer.get()
if transfer.exchange_rate is None:
return ""
requested_amount = payment.amount.value
if payment.amount.currency == 'sterling':
sterling = requested_amount
shillings = int(requested_amount * transfer.exchange_rate)
if payment.amount.currency == 'ugx':
sterling = int(requested_amount / transfer.exchange_rate)
shillings = requested_amount
return u"£{:,}".format(sterling) + "/" + u"{:,}".format(shillings) + ' Ush'
STATE_CLOSED = 0
def email_entity_creator(entity, user, message):
if not hasattr(entity, 'creator'):
return
if user.key == entity.creator:
logging.info('not sending email same user %s', user.name)
return
creator = entity.creator.get()
entity_type = entity.key.kind()
entity_ref = renderers.render_link(entity.name, urls.url_for_entity(entity, external=True))
content = renderers.render_single_column((entity_type, entity_ref, message, user.name),
('EntityType', 'Entity', 'Message', 'User'))
mailer.send_email('Workbench Entity State Change', content, [creator.email])
class Model(object):
def __init__(self, entity, committee=None, table=None):
self.entity = entity
self.committee = committee
self.table = table
self.user = lookup_current_user()
self.forms = {}
self.errors=[]
self.next_entity = None
self.entity_deleted = False
self.show_closed = False
def get_state(self):
return getattr(self.entity, 'state_index', 0)
def user_has_role(self, role_type):
if self.user.key is None:
return False
query = db.Role.query(ancestor=self.user.key).filter(db.Role.type_index==role_type)
if role_type == RoleType.COMMITTEE_ADMIN:
query = query.filter(db.Role.committee==self.committee)
return query.iter().has_next()
def lookup_users_with_role(self, role_type):
query = db.Role.query(db.Role.type_index==role_type)
if role_type == RoleType.COMMITTEE_ADMIN:
query = query.filter(db.Role.committee==self.committee)
return query.map(lambda r: r.key.parent().get())
def add_form(self, action_name, form):
self.forms[action_name] = form
def get_form(self, action_name):
return self.forms.get(action_name)
def is_stateful(self):
return hasattr(self.table, 'state_index')
def apply_query(self, entity_query):
if not hasattr(self.table, 'state_index'):
return entity_query.fetch()
if self.show_closed:
return entity_query.filter(self.table.state_index == 0).fetch()
else:
return entity_query.filter(self.table.state_index > 0).fetch()
def perform_create(self, action_name):
form = self.get_form(action_name)
if not form.validate():
return False
if not self.check_uniqueness(form):
return False
entity = self.entity
form.populate_obj(entity)
if hasattr(entity, 'creator'):
entity.creator = self.user.key
entity.put()
self.audit(action_name, "Create performed")
return True
def check_uniqueness(self, form):
if not hasattr(form, 'name'):
return True
name = form.name.data
if name == self.entity.name:
return True
parent_key = None
if self.entity.key:
parent_key = self.entity.key.parent()
existing = self.table.query(self.table.name == name, ancestor=parent_key).count(1)
if existing > 0:
form.name.errors = [ 'Entity named %s already exists' % name ]
return False
return True
def perform_update(self, action_name):
form = self.get_form(action_name)
if not form.validate():
return False
if not self.check_uniqueness(form):
return False
form.populate_obj(self.entity)
self.entity.put()
self.audit(action_name, "Update performed")
return True
def perform_close(self, action_name):
self.entity.state_index = STATE_CLOSED
self.entity.put()
return self.email_and_audit(action_name, "%s performed" % action_name.title())
def add_error(self, error_text):
self.errors.append(error_text)
def audit(self, action_name, message, entity=None, parent_key=None):
if not entity:
entity = self.entity
audit = db.AuditRecord()
audit.entity = entity.key
audit.parent = parent_key
audit.user = self.user.key
audit.action = action_name
audit.message = message
audit.put()
return audit
def email_and_audit(self, action_name, message):
audit = self.audit(action_name, message)
email_entity_creator(self.entity, self.user, message)
return audit
def __repr__(self):
return 'Model(%s, %s)' % (repr(self.entity), self.committee)
|
mit
| 7,102,527,674,163,779,000
| 30.128631
| 95
| 0.603439
| false
| 3.643516
| false
| false
| false
|
josdejong/mathjs
|
test/benchmark/matrix_operations_python.py
|
1
|
4102
|
# install numpy
#
# sudo apt install python-pip
# pip install --user --upgrade pip
# pip install --user numpy scipy matplotlib ipython jupyter pandas sympy nose
#
import sys
import timeit
import numpy as np
print (sys.version_info)
# fiedler matrix 25 x 25
A = [
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24],
[ 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23],
[ 2, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22],
[ 3, 2, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21],
[ 4, 3, 2, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20],
[ 5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
[ 6, 5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18],
[ 7, 6, 5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17],
[ 8, 7, 6, 5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
[ 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
[10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14],
[11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
[12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
[13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
[14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
[15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
[16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8],
[17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5, 6, 7],
[18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5, 6],
[19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 1, 2, 3, 4, 5],
[20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 1, 2, 3, 4],
[21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 1, 2, 3],
[22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 1, 2],
[23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 1],
[24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
]
iterations = 10000
ms = 1000000
def add():
return np.add(A, A)
def multiply():
return np.matmul(A, A)
def transpose():
return np.transpose(A)
def det():
return np.linalg.det(A)
print('Add duration {} microseconds'.format(timeit.timeit(add, number=iterations) * ms / (iterations)))
print('Multiply duration {} microseconds'.format(timeit.timeit(multiply, number=iterations) * ms / iterations))
print('Transpose duration {} microseconds'.format(timeit.timeit(transpose, number=iterations) * ms / iterations))
print('Det duration {} microseconds'.format(timeit.timeit(det, number=iterations) * ms / iterations))
# run again with more iterations to see whether we get the same sort of durations
iterations2 = iterations * 10
print('')
print('second round...')
print('Add duration {} microseconds'.format(timeit.timeit(add, number=iterations2) * ms / iterations2))
print('Multiply duration {} microseconds'.format(timeit.timeit(multiply, number=iterations2) * ms / iterations2))
print('Transpose duration {} microseconds'.format(timeit.timeit(transpose, number=iterations2) * ms / iterations2))
print('Det duration {} microseconds'.format(timeit.timeit(det, number=iterations2) * ms / iterations2))
|
apache-2.0
| -5,856,528,032,766,062,000
| 57.6
| 115
| 0.478303
| false
| 1.95893
| false
| false
| false
|
Krissbro/LondonGaymers
|
cleverbot/cleverbot.py
|
1
|
2825
|
try:
from cleverbot import Cleverbot as _Cleverbot
if 'API_URL' in _Cleverbot.__dict__:
_Cleverbot = False
except:
_Cleverbot = False
from discord.ext import commands
from cogs.utils import checks
from .utils.dataIO import dataIO
import os
import discord
import asyncio
class Cleverbot():
"""Cleverbot"""
def __init__(self, bot):
self.bot = bot
self.clv = _Cleverbot('Red-DiscordBot')
self.settings = dataIO.load_json("data/cleverbot/settings.json")
@commands.group(no_pm=True, invoke_without_command=True)
async def cleverbot(self, *, message):
"""Talk with cleverbot"""
result = await self.get_response(message)
await self.bot.say(result)
@cleverbot.command()
@checks.is_owner()
async def toggle(self):
"""Toggles reply on mention"""
self.settings["TOGGLE"] = not self.settings["TOGGLE"]
if self.settings["TOGGLE"]:
await self.bot.say("I will reply on mention.")
else:
await self.bot.say("I won't reply on mention anymore.")
dataIO.save_json("data/cleverbot/settings.json", self.settings)
async def get_response(self, msg):
question = self.bot.loop.run_in_executor(None, self.clv.ask, msg)
try:
answer = await asyncio.wait_for(question, timeout=10)
except asyncio.TimeoutError:
answer = "We'll talk later..."
return answer
async def on_message(self, message):
if not self.settings["TOGGLE"] or message.channel.is_private:
return
if not self.bot.user_allowed(message):
return
if message.author.id != self.bot.user.id:
mention = message.server.me.mention
if message.content.startswith(mention):
content = message.content.replace(mention, "").strip()
await self.bot.send_typing(message.channel)
response = await self.get_response(content)
await self.bot.send_message(message.channel, response)
def check_folders():
if not os.path.exists("data/cleverbot"):
print("Creating data/cleverbot folder...")
os.makedirs("data/cleverbot")
def check_files():
f = "data/cleverbot/settings.json"
data = {"TOGGLE" : True}
if not dataIO.is_valid_json(f):
dataIO.save_json(f, data)
def setup(bot):
if _Cleverbot is False:
raise RuntimeError("Your cleverbot library is either missing or not "
"up to date. Please do\n"
"[p]debug bot.pip_install('cleverbot')\n"
"and restart Red once you get a response.\n"
"Then [p]load cleverbot")
check_folders()
check_files()
n = Cleverbot(bot)
bot.add_cog(n)
|
gpl-3.0
| 1,294,411,271,893,057,800
| 33.036145
| 77
| 0.603894
| false
| 3.678385
| false
| false
| false
|
Koodous/androguard-yara
|
download_androguard_report.py
|
1
|
2918
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2015. The Koodous Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import requests
import argparse
import json
__author__ = 'A.Sánchez <asanchez@koodous.com> && xgusix'
def download_report(sha256, auth, dst):
"""
Function to download and save the Androguard report from Koodous.
"""
if not auth:
print("Please, provide your token!")
return
url = 'https://api.koodous.com/apks/{}/analysis'.format(sha256)
data = dict()
response = requests.get(url=url, headers={"Authorization": "Token {}".format(auth)})
#Check if the APK is in the database
if response.status_code == 405:
print ("Sorry, this APK does not have a report yet, you can request it "
"via the Koodous website.")
elif response.status_code == 404:
print ("Sorry, we don\'t have this APK in Koodous. You can share with "
"the community through our website.")
rt = False
if response.status_code == 200:
rt = True
data = response.json()
try:
json.dump(data.get('androguard', None), open(dst, 'w'))
print "Report created in {}".format(dst)
except Exception, e:
print "There was an error writing the report: {}".format(e)
rt = False
return rt
def main():
parser = argparse.ArgumentParser(
description="Tool to download reports from Koodous")
parser.add_argument('-s', '--sha256', action='store',
dest='sha256')
parser.add_argument('-o', '--output', action='store', dest='filename',
help=("File to dump the downloaded report, by default: "
"<sha256>-report.json"))
parser.add_argument('-a', '--auth', action='store', dest='auth',
help=("Authorization token for Koodous API"))
args = parser.parse_args()
if not args.sha256 or not args.auth:
print "I need at least a SHA256 hash and your Koodous API token!"
parser.print_help()
return
report_name = "{}-report.json".format(args.sha256)
if args.filename:
report_name = args.filename
success = download_report(sha256=args.sha256, auth=args.auth, dst=report_name)
if success:
print "Androguard report saved in {}".format(report_name)
if __name__ == '__main__':
main()
|
apache-2.0
| -8,956,185,248,688,753,000
| 31.411111
| 88
| 0.63387
| false
| 3.936572
| false
| false
| false
|
TUBvision/hrl
|
lib/graphics/graphics.py
|
1
|
12520
|
"""
This is the HRL submodule for handling graphics devices and OpenGL. Graphics
devices in HRL instantiate the 'Graphics' abstract class, which defines the
common functions required for displaying greyscale images.
Image presentation in HRL can be understood as a multi step process as follows:
Bitmap (The image written in an 8 bit, 4 channel format)
-> Greyscale Array (A numpy array of doubles between 0 and 1)
-> Processed Greyscale Array (A Gresycale Array remapped with a lookup table)
-> Display List (An index to a stored texture in graphical memory)
-> Texture (A python class instance which can be drawn)
i) The conversion of Bitmaps to Greyscale arrays is handled by functions in
'hrl.extra' Where possible, it is recommended to bypass this step and work
directly with numpy arrays.
ii) The conversion of Greyscale Arrays to Processed Greyscale Arrays is handled by
the base 'hrl' class, and consists primarily of gamma correction and contrast
range selection.
iii) Saving a Processed Greyscale Array into graphics memory and interacting
with it as a Texture object is handled in this module.
The 'Texture' class is a wrapper for certain OpenGL functions designed to
simplify the display of individual 2d images. The sole method of the Texture
class is 'draw'.
Texture objects are not meant to be created on their own, but are instead
created via the 'newTexture' method of Graphics. Graphics.newTexture will take
a given Processed Greyscale Array (with other optional arguments as well), and
return it as Texture object designed to be shown on the particular Graphics
object.
The openGL code was based largely on a great tutorial by a mysterious tutor
here: http://disruption.ca/gutil/introduction.html
"""
import OpenGL.GL as gl
import pygame as pg
import numpy as np
import abc
### Classes ###
## Graphics Class ##
class Graphics(object):
"""
The Graphics abstract base class. New graphics hardware must instantiate
this class. The key method is 'greyToChannels', which defines how to
represent a greyscale value between 0 and 1 as a 4-tuple (r,g,b,a), so that
the given grey value is correctly on the Graphics backend.
"""
__metaclass__ = abc.ABCMeta
# Abstract Methods #
def greyToChannels(self,gry):
"""
Converts a single greyscale value into a 4 colour channel representation
specific to self (the graphics backend).
Parameters
----------
gry : The grey value
Returns
-------
(r,g,b,a) the grey represented as a corresponding 4-tuple
"""
return
# Concrete Methods #
def __init__(self,w,h,bg,fs=False,db=True,lut=None):
"""
The Graphics constructor predefines the basic OpenGL initializations
that must be performed regardless of the specific backends.
Parameters
----------
w : The width (in pixels) of the openGL window
h : The height (in pixels) of the openGL window
bg : The default background grey value (between 0 and 1)
fs : Enable fullscreen display (Boolean) Default: False
db : Enable double buffering (Boolean) Default: True
Returns
-------
Graphics object
"""
# Here we can add other options like fullscreen
dbit = pg.OPENGL
if db: dbit = dbit | pg.DOUBLEBUF
if fs: dbit = dbit | pg.FULLSCREEN
screen = pg.display.set_mode((w,h), dbit)
pg.mouse.set_visible(False)
# Disables this thing
gl.glDisable(gl.GL_DEPTH_TEST)
# Set Matrix style coordinate system.
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity();
gl.glOrtho(0,w,h,0,-1,1)
gl.glMatrixMode(gl.GL_MODELVIEW)
# Enable texturing
gl.glEnable(gl.GL_TEXTURE_2D)
# Enable blending
gl.glEnable(gl.GL_BLEND)
# Blend settings. Blending is unrelated to e.g. magnification.
# Blending is how the colours from transluscent objects are
# combined, and is therefore largely irrelevant.
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
# Gamma Function Correction
self._lut = None
self._gammainv = lambda x: x
if lut != None:
print "..using look-up table: %s" % lut
self._lut = np.genfromtxt(lut,skip_header=1)
self._gammainv = lambda x: np.interp(x,self._lut[:,0],self._lut[:,1])
# Here we change the default color
self.changeBackground(bg)
self.flip()
def newTexture(self,grys0,shape='square'):
"""
Given a numpy array of values between 0 and 1, returns a new
Texture object. The texture object comes equipped with the draw
method for obvious purposes.
NB: Images in HRL are represented in matrix style coordinates. i.e. the
origin is in the upper left corner, and increases to the right and
downwards.
Parameters
----------
grys : The greyscale numpy array
shape : The shape to 'cut out' of the given greyscale array. A square
will render the entire array. Available: 'square', 'circle'
Default: 'square'
Returns
-------
Texture object
"""
grys = np.flipud(grys0) # flipping up-down necessary
grys = self._gammainv(grys) # added gamma correction
byts = channelsToInt(self.greyToChannels(grys[::-1,])).tostring()
wdth = len(grys[0])
hght = len(grys[:,0])
return Texture(byts,wdth,hght,shape)
def flip(self,clr=True):
"""
Flips in the image backbuffer. In general, one will want to draw
a set of Textures and then call flip to display them all at once.
Takes a clr argument which causes the back buffer to clear after
the flip. When off, textures will be drawn on top of the current back
buffer. By default the back buffer will be cleared automatically, but in
performance sensitive scenarios it may be worth turning this off.
Parameters
----------
clr : Whether to clear the back buffer after flip. Default: True
"""
pg.display.flip()
if clr: gl.glClear(gl.GL_COLOR_BUFFER_BIT)
def changeBackground(self,bg):
"""
Changes the current background grey value.
Parameters
----------
bg : The new gray value (between 0 and 1)
"""
mx = float(2**8-1)
(r,g,b,a) = self.greyToChannels(self._gammainv(bg))
gl.glClearColor(r/mx,g/mx,b/mx,a/mx)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
## Texture Class ##
class Texture:
"""
The Texture class is a wrapper object for a compiled texture in
OpenGL. It's only method is the draw method.
"""
def __init__(self,byts,wdth,hght,shape):
"""
The internal constructor for Textures. Users should use
Graphics.newTexture to create textures rather than this constructor.
Parameters
----------
byts : A bytestring representation of the greyscale array
wdth : The width of the array
hght : The height of the array
shape : The shape to 'cut out' of the given greyscale array. A square
will render the entire array. Available: 'square', 'circle'
Returns
-------
Texture object
"""
self._txid, self.wdth, self.hght = loadTexture(byts,wdth,hght)
if shape == 'square':
self._dlid = createSquareDL(self._txid,self.wdth,self.hght)
elif shape == 'circle':
self._dlid = createCircleDL(self._txid,self.wdth,self.hght)
else:
raise NameError('Invalid Shape')
# def __del__(self):
# if self._txid != None:
# deleteTexture(self._txid)
# self._txid = None
# if self._dlid != None:
# deleteTextureDL(self._dlid)
# self._dlid = None
def draw(self,pos=None,sz=None,rot=0,rotc=None):
"""
This method loads the Texture into the back buffer. Calling
Graphics.flip will cause it to be drawn to the screen. It also allows a
number of transformation to be performed on the image before it is
loaded (e.g. translation, rotation)
Parameters
----------
pos : A pair (rows,columns) representing the the position in pixels in
the Graphics window of the upper left corner (origin) of the Texture
sz : A tuple (width,height) representing the size of the image in
pixels. None causes the natural width and height of the image to be
used, which prevents an blending of the image.
rot : Rotation applied to the image. May result in scaling/interpolation.
rotc : Defines the centre of the rotation.
Returns
-------
None
"""
if pos:
gl.glLoadIdentity()
gl.glTranslate(pos[0],pos[1],0)
if rot != 0:
if rotc == None:
rotc = (self.wdth / 2, self.hght / 2)
(w,h) = rotc
gl.glTranslate(rotc[0],rotc[1],0)
gl.glRotate(rot,0,0,-1)
gl.glTranslate(-rotc[0],-rotc[1],0)
if sz:
(wdth,hght) = sz
gl.glScalef(wdth/(self.wdth*1.0), hght/(self.hght*1.0),1.0)
gl.glCallList(self._dlid)
### Internal Functions ###
## OpenGL Texture Functions ##
def channelsToInt((r,g,b,a)):
"""
Takes a channel representation and returns a corresponding unsigned 32 bit
int. Running the tostring method on a 2d array which has had this function
applied to it will produce a bytestring appropriate for use as a texture
with openGL.
"""
R = 2**0
G = 2**8
B = 2**16
A = 2**24
return r*R + g*G + b*B + a*A
def loadTexture(byts,wdth,hght):
"""
LoadTexture takes a bytestring representation of a Processed Greyscale array
and loads it into OpenGL texture memory.
In this function we also define our texture minification and
magnification functions, of which there are many options. Take great
care when shrinking, blowing up, or rotating an image. The resulting
interpolations can effect experimental results.
"""
txid = gl.glGenTextures(1)
gl.glBindTexture(gl.GL_TEXTURE_2D, txid)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA, wdth, hght, 0, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, byts)
return txid,wdth,hght
def deleteTexture(txid):
"""
deleteTexture removes the texture from the OpenGL texture memory.
"""
gl.glDeleteTextures(txid)
## OpenGL Display List Functions ##
def createSquareDL(txid,wdth,hght):
"""
createSquareDL takes a texture id with width and height and
generates a display list - an precompiled set of instructions for
rendering the image. This speeds up image display. The instructions
compiled are essentially creating a square and binding the texture
to it.
"""
dlid = gl.glGenLists(1)
gl.glNewList(dlid,gl.GL_COMPILE)
gl.glBindTexture(gl.GL_TEXTURE_2D, txid)
gl.glBegin(gl.GL_QUADS)
gl.glTexCoord2f(0, 0); gl.glVertex2f(0, 0)
gl.glTexCoord2f(0, 1); gl.glVertex2f(0, hght)
gl.glTexCoord2f(1, 1); gl.glVertex2f(wdth, hght)
gl.glTexCoord2f(1, 0); gl.glVertex2f(wdth, 0)
gl.glEnd()
gl.glFinish()
gl.glEndList()
return dlid
def createCircleDL(txid,wdth,hght):
"""
createCircleDL takes a texture id with width and height and
generates a display list - an precompiled set of instructions for
rendering the image. This speeds up image display. The instructions
compiled are essentially creating a circle and binding the texture
to it.
"""
dlid = gl.glGenLists(1)
gl.glNewList(dlid,gl.GL_COMPILE)
gl.glBindTexture(gl.GL_TEXTURE_2D, txid)
gl.glBegin(gl.GL_TRIANGLE_FAN)
for ang in np.linspace(0,2*np.pi,360):
(x,y) = ((np.cos(ang))/2,(np.sin(ang))/2)
gl.glTexCoord2f(x, y); gl.glVertex2f(x*wdth,y*hght)
gl.glEnd()
gl.glFinish()
gl.glEndList()
return dlid
def deleteTextureDL(dlid):
"""
deleteTextureDL removes the given display list from memory.
"""
gl.glDeleteLists(dlid,1)
|
lgpl-2.1
| 7,395,943,921,647,267,000
| 32.655914
| 106
| 0.640575
| false
| 3.757503
| false
| false
| false
|
PeterDing/iScript
|
xiami.py
|
1
|
55901
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import re
import sys
from getpass import getpass
import os
import copy
import random
import time
import datetime
import json
import argparse
import requests
import urllib
import hashlib
import select
from mutagen.id3 import ID3,TRCK,TIT2,TALB,TPE1,APIC,TDRC,COMM,TPOS,USLT
from HTMLParser import HTMLParser
url_song = "http://www.xiami.com/song/%s"
url_album = "http://www.xiami.com/album/%s"
url_collect = "http://www.xiami.com/collect/ajax-get-list"
url_artist_albums = "http://www.xiami.com/artist/album/id/%s/page/%s"
url_artist_top_song = "http://www.xiami.com/artist/top-%s"
url_lib_songs = "http://www.xiami.com/space/lib-song/u/%s/page/%s"
url_recent = "http://www.xiami.com/space/charts-recent/u/%s/page/%s"
# 电台来源:来源于"收藏的歌曲","收藏的专辑","喜欢的艺人","我收藏的精选集"
url_radio_my = "http://www.xiami.com/radio/xml/type/4/id/%s"
# 虾米猜, 基于你的虾米试听行为所建立的个性电台
url_radio_c = "http://www.xiami.com/radio/xml/type/8/id/%s"
############################################################
# wget exit status
wget_es = {
0:"No problems occurred.",
2:"User interference.",
1<<8:"Generic error code.",
2<<8:"Parse error - for instance, when parsing command-line ' \
'optio.wgetrc or .netrc...",
3<<8:"File I/O error.",
4<<8:"Network failure.",
5<<8:"SSL verification failure.",
6<<8:"Username/password authentication failure.",
7<<8:"Protocol errors.",
8<<8:"Server issued an error response."
}
############################################################
parser = HTMLParser()
s = '\x1b[%d;%dm%s\x1b[0m' # terminual color template
cookie_file = os.path.join(os.path.expanduser('~'), '.Xiami.cookies')
headers = {
"Accept":"text/html,application/xhtml+xml,application/xml; " \
"q=0.9,image/webp,*/*;q=0.8",
"Accept-Encoding":"text/html",
"Accept-Language":"en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4,zh-TW;q=0.2",
"Content-Type":"application/x-www-form-urlencoded",
"Referer":"http://www.xiami.com/",
"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"\
}
HEADERS2 = {
'pragma': 'no-cache',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
'accept': 'text/javascript, application/javascript, application/ecmascript, application/x-ecmascript, */*; q=0.01',
'cache-control': 'no-cache',
'authority': 'www.xiami.com',
'x-requested-with': 'XMLHttpRequest',
'referer': 'https://www.xiami.com/play?ids=/song/playlist/id/',
}
ss = requests.session()
ss.headers.update(headers)
############################################################
# Regular Expression Templates
re_disc_description = r'disc (\d+) \[(.+?)\]'
############################################################
def decry(row, encryed_url):
url = encryed_url
urllen = len(url)
rows = int(row)
cols_base = urllen / rows # basic column count
rows_ex = urllen % rows # count of rows that have 1 more column
matrix = []
for r in xrange(rows):
length = cols_base + 1 if r < rows_ex else cols_base
matrix.append(url[:length])
url = url[length:]
url = ''
for i in xrange(urllen):
url += matrix[i % rows][i / rows]
return urllib.unquote(url).replace('^', '0')
def modificate_text(text):
text = parser.unescape(text)
text = re.sub(r'//*', '-', text)
text = text.replace('/', '-')
text = text.replace('\\', '-')
text = re.sub(r'\s\s+', ' ', text)
text = text.strip()
return text
def modificate_file_name_for_wget(file_name):
file_name = re.sub(r'\s*:\s*', u' - ', file_name) # for FAT file system
file_name = file_name.replace('?', '') # for FAT file system
file_name = file_name.replace('"', '\'') # for FAT file system
file_name = file_name.replace('$', '\\$') # for command, see issue #7
return file_name
def z_index(song_infos):
size = len(song_infos)
z = len(str(size))
return z
########################################################
class Song(object):
def __init__(self):
self.__sure()
self.track = 0
self.year = 0
self.cd_serial = 0
self.disc_description = ''
# z = len(str(album_size))
self.z = 1
def __sure(self):
__dict__ = self.__dict__
if '__keys' not in __dict__:
__dict__['__keys'] = {}
def __getattr__(self, name):
__dict__ = self.__dict__
return __dict__['__keys'].get(name)
def __setattr__(self, name, value):
__dict__ = self.__dict__
__dict__['__keys'][name] = value
def __getitem__(self, key):
return getattr(self, key)
def __setitem__(self, key, value):
return setattr(self, key, value)
def feed(self, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
class XiamiH5API(object):
URL = 'http://api.xiami.com/web'
PARAMS = {
'v': '2.0',
'app_key': '1',
}
def __init__(self):
self.cookies = {
'user_from': '2',
'XMPLAYER_addSongsToggler': '0',
'XMPLAYER_isOpen': '0',
'_xiamitoken': hashlib.md5(str(time.time())).hexdigest()
}
self.sess = requests.session()
self.sess.cookies.update(self.cookies)
def _request(self, url, method='GET', **kwargs):
try:
resp = self.sess.request(method, url, **kwargs)
except Exception, err:
print 'Error:', err
sys.exit()
return resp
def _make_params(self, **kwargs):
params = copy.deepcopy(self.PARAMS)
params.update(kwargs)
return params
def song(self, song_id):
params = self._make_params(id=song_id, r='song/detail')
url = self.URL
resp = self._request(url, params=params, headers=headers)
info = resp.json()['data']['song']
pic_url = re.sub('_\d+\.', '.', info['logo'])
song = Song()
song.feed(
song_id=info['song_id'],
song_name=info['song_name'],
album_id=info['album_id'],
album_name=info['album_name'],
artist_id=info['artist_id'],
artist_name=info['artist_name'],
singers=info['singers'],
album_pic_url=pic_url,
comment='http://www.xiami.com/song/' + str(info['song_id'])
)
return song
def album(self, album_id):
url = self.URL
params = self._make_params(id=album_id, r='album/detail')
resp = self._request(url, params=params, headers=headers)
info = resp.json()['data']
songs = []
album_id=info['album_id'],
album_name=info['album_name'],
artist_id = info['artist_id']
artist_name = info['artist_name']
pic_url = re.sub('_\d+\.', '.', info['album_logo'])
for track, info_n in enumerate(info['songs'], 1):
song = Song()
song.feed(
song_id=info_n['song_id'],
song_name=info_n['song_name'],
album_id=album_id,
album_name=album_name,
artist_id=artist_id,
artist_name=artist_name,
singers=info_n['singers'],
album_pic_url=pic_url,
track=track,
comment='http://www.xiami.com/song/' + str(info_n['song_id'])
)
songs.append(song)
return songs
def collect(self, collect_id):
url = self.URL
params = self._make_params(id=collect_id, r='collect/detail')
resp = self._request(url, params=params, headers=headers)
info = resp.json()['data']
collect_name = info['collect_name']
collect_id = info['list_id']
songs = []
for info_n in info['songs']:
pic_url = re.sub('_\d+\.', '.', info['album_logo'])
song = Song()
song.feed(
song_id=info_n['song_id'],
song_name=info_n['song_name'],
album_id=info_n['album_id'],
album_name=info_n['album_name'],
artist_id=info_n['artist_id'],
artist_name=info_n['artist_name'],
singers=info_n['singers'],
album_pic_url=pic_url,
comment='http://www.xiami.com/song/' + str(info_n['song_id'])
)
songs.append(song)
return collect_id, collect_name, songs
def artist_top_songs(self, artist_id, page=1, limit=20):
url = self.URL
params = self._make_params(id=artist_id, page=page, limit=limit, r='artist/hot-songs')
resp = self._request(url, params=params, headers=headers)
info = resp.json()['data']
for info_n in info['songs']:
song_id = info_n['song_id']
yield self.song(song_id)
def search_songs(self, keywords, page=1, limit=20):
url = self.URL
params = self._make_params(key=keywords, page=page, limit=limit, r='search/songs')
resp = self._request(url, params=params, headers=headers)
info = resp.json()['data']
for info_n in info['songs']:
pic_url = re.sub('_\d+\.', '.', info['album_logo'])
song = Song()
song.feed(
song_id=info_n['song_id'],
song_name=info_n['song_name'],
album_id=info_n['album_id'],
album_name=info_n['album_name'],
artist_id=info_n['artist_id'],
artist_name=info_n['artist_name'],
singers=info_n['singer'],
album_pic_url=pic_url,
comment='http://www.xiami.com/song/' + str(info_n['song_id'])
)
yield song
def get_song_id(self, *song_sids):
song_ids = []
for song_sid in song_sids:
if isinstance(song_sid, int) or song_sid.isdigit():
song_ids.append(int(song_sid))
url = 'https://www.xiami.com/song/playlist/id/{}/cat/json'.format(song_sid)
resp = self._request(url, headers=headers)
info = resp.json()
song_id = int(str(info['data']['trackList'][0]['song_id']))
song_ids.append(song_id)
return song_ids
class XiamiWebAPI(object):
URL = 'https://www.xiami.com/song/playlist/'
def __init__(self):
self.sess = requests.session()
def _request(self, url, method='GET', **kwargs):
try:
resp = self.sess.request(method, url, **kwargs)
except Exception, err:
print 'Error:', err
sys.exit()
return resp
def _make_song(self, info):
song = Song()
location=info['location']
row = location[0]
encryed_url = location[1:]
durl = decry(row, encryed_url)
song.feed(
song_id=info['song_id'],
song_sub_title=info['song_sub_title'],
songwriters=info['songwriters'],
singers=info['singers'],
song_name=parser.unescape(info['name']),
album_id=info['album_id'],
album_name=info['album_name'],
artist_id=info['artist_id'],
artist_name=info['artist_name'],
composer=info['composer'],
lyric_url='http:' + info['lyric_url'],
track=info['track'],
cd_serial=info['cd_serial'],
album_pic_url='http:' + info['album_pic'],
comment='http://www.xiami.com/song/' + str(info['song_id']),
length=info['length'],
play_count=info['playCount'],
location=info['location'],
location_url=durl
)
return song
def _find_z(self, album):
zs = []
song = album[0]
for i, song in enumerate(album[:-1]):
next_song = album[i+1]
cd_serial = song.cd_serial
next_cd_serial = next_song.cd_serial
if cd_serial != next_cd_serial:
z = len(str(song.track))
zs.append(z)
z = len(str(song.track))
zs.append(z)
for song in album:
song.z = zs[song.cd_serial - 1]
def song(self, song_id):
url = self.URL + 'id/%s/cat/json' % song_id
resp = self._request(url, headers=HEADERS2)
# there is no song
if not resp.json().get('data'):
return None
info = resp.json()['data']['trackList'][0]
song = self._make_song(info)
return song
def songs(self, *song_ids):
url = self.URL + 'id/%s/cat/json' % '%2C'.join(song_ids)
resp = self._request(url, headers=HEADERS2)
# there is no song
if not resp.json().get('data'):
return None
info = resp.json()['data']
songs = []
for info_n in info['trackList']:
song = self._make_song(info_n)
songs.append(song)
return songs
def album(self, album_id):
url = self.URL + 'id/%s/type/1/cat/json' % album_id
resp = self._request(url, headers=HEADERS2)
# there is no album
if not resp.json().get('data'):
return None
info = resp.json()['data']
songs = []
for info_n in info['trackList']:
song = self._make_song(info_n)
songs.append(song)
self._find_z(songs)
return songs
def collect(self, collect_id):
url = self.URL + 'id/%s/type/3/cat/json' % collect_id
resp = self._request(url, headers=HEADERS2)
info = resp.json()['data']
songs = []
for info_n in info['trackList']:
song = self._make_song(info_n)
songs.append(song)
return songs
def search_songs(self, keywords):
url = 'https://www.xiami.com/search?key=%s&_=%s' % (
urllib.quote(keywords), int(time.time() * 1000))
resp = self._request(url, headers=headers)
html = resp.content
song_ids = re.findall(r'song/(\w+)"', html)
songs = self.songs(*song_ids)
return songs
class xiami(object):
def __init__(self):
self.dir_ = os.getcwdu()
self.template_record = 'https://www.xiami.com/count/playrecord?sid={song_id}&ishq=1&t={time}&object_id={song_id}&object_name=default&start_point=120&_xiamitoken={token}'
self.collect_id = ''
self.album_id = ''
self.artist_id = ''
self.song_id = ''
self.user_id = ''
self.cover_id = ''
self.cover_data = ''
self.html = ''
self.disc_description_archives = {}
self.download = self.play if args.play else self.download
self._is_play = bool(args.play)
self._api = XiamiWebAPI()
def init(self):
if os.path.exists(cookie_file):
try:
cookies = json.load(open(cookie_file))
ss.cookies.update(cookies.get('cookies', cookies))
if not self.check_login():
print s % (1, 91, ' !! cookie is invalid, please login\n')
sys.exit(1)
except:
open(cookie_file, 'w').close()
print s % (1, 97, ' please login')
sys.exit(1)
else:
print s % (1, 91, ' !! cookie_file is missing, please login')
sys.exit(1)
def check_login(self):
#print s % (1, 97, '\n -- check_login')
url = 'http://www.xiami.com/task/signin'
r = self._request(url)
if r.content:
#print s % (1, 92, ' -- check_login success\n')
# self.save_cookies()
return True
else:
print s % (1, 91, ' -- login fail, please check email and password\n')
return False
def _request(self, url, headers=None, params=None, data=None, method='GET', timeout=30, retry=2):
for _ in range(retry):
try:
headers = headers or ss.headers
resp = ss.request(method, url, headers=headers, params=params, data=data, timeout=timeout)
except Exception, err:
continue
if not resp.ok:
raise Exception("response is not ok, status_code = %s" % resp.status_code)
# save cookies
self.save_cookies()
return resp
raise err
# manually, add cookies
# you must know how to get the cookie
def add_cookies(self, cookies):
_cookies = {}
for item in cookies.strip('; ').split('; '):
k, v = item.split('=', 1)
_cookies[k] = v
self.save_cookies(_cookies)
ss.cookies.update(_cookies)
def login(self, email, password):
print s % (1, 97, '\n -- login')
#validate = self.get_validate()
data = {
'email': email,
'password': password,
#'validate': validate,
'remember': 1,
'LoginButton': '登录'
}
hds = {
'Origin': 'http://www.xiami.com',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.8',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Cache-Control': 'max-age=1',
'Referer': 'http://www.xiami.com/web/login',
'Connection': 'keep-alive',
'_xiamitoken': hashlib.md5(str(time.time())).hexdigest()
}
url = 'https://login.xiami.com/web/login'
for i in xrange(2):
res = self._request(url, headers=hds, data=data)
if ss.cookies.get('member_auth'):
return True
else:
if 'checkcode' not in res.content:
return False
validate = self.get_validate(res.content)
data['validate'] = validate
return False
# {{{ code from https://github.com/ly0/xiami-tools/blob/master/xiami.py
def login_taobao(self, username, password):
print s % (1, 97, '\n -- login taobao')
p = {
"lang": "zh_cn",
"appName": "xiami",
"appEntrance": "taobao",
"cssLink": "",
"styleType": "vertical",
"bizParams": "",
"notLoadSsoView": "",
"notKeepLogin": "",
"appName": "xiami",
"appEntrance": "taobao",
"cssLink": "https://h.alipayobjects.com/static/applogin/" \
"assets/login/mini-login-form-min.css",
"styleType": "vertical",
"bizParams": "",
"notLoadSsoView": "true",
"notKeepLogin": "true",
"rnd": str(random.random()),
}
url = 'https://passport.alipay.com/mini_login.htm'
r = ss.get(url, params=p, verify=True)
cm = r.content
data = {
"loginId": username,
"password": password,
"appName": "xiami",
"appEntrance": "taobao",
"hsid": re.search(r'"hsid" value="(.+?)"', cm).group(1),
"cid": re.search(r'"cid" value="(.+?)"', cm).group(1),
"rdsToken": re.search(r'"rdsToken" value="(.+?)"', cm).group(1),
"umidToken": re.search(r'"umidToken" value="(.+?)"', cm).group(1),
"_csrf_token": re.search(r'"_csrf_token" value="(.+?)"', cm).group(1),
"checkCode": "",
}
url = 'https://passport.alipay.com/newlogin/login.do?fromSite=0'
theaders = headers
theaders['Referer'] = 'https://passport.alipay.com/mini_login.htm'
while True:
r = ss.post(url, data=data, headers=theaders, verify=True)
j = r.json()
if j['content']['status'] == -1:
if 'titleMsg' not in j['content']['data']: continue
err_msg = j['content']['data']['titleMsg']
if err_msg == u'请输入验证码' or err_msg == u'验证码错误,请重新输入':
captcha_url = 'http://pin.aliyun.com/get_img?' \
'identity=passport.alipay.com&sessionID=%s' % data['cid']
tr = self._request(captcha_url, headers=theaders)
path = os.path.join(os.path.expanduser('~'), 'vcode.jpg')
with open(path, 'w') as g:
img = tr.content
g.write(img)
print " ++ 验证码已经保存至", s % (2, 91, path)
captcha = raw_input(
(s % (2, 92, ' ++ %s: ' % err_msg)).encode('utf8'))
data['checkCode'] = captcha
continue
if not j['content']['data'].get('st'):
print s % (2, 91, " !! 输入的 username 或 password 有误.")
sys.exit(1)
url = 'http://www.xiami.com/accounts/back?st=%s' \
% j['content']['data']['st']
self._request(url, headers=theaders)
self.save_cookies()
return
# }}}
def get_validate(self, cn):
#url = 'https://login.xiami.com/coop/checkcode?forlogin=1&%s' \
#% int(time.time())
url = re.search(r'src="(http.+checkcode.+?)"', cn).group(1)
path = os.path.join(os.path.expanduser('~'), 'vcode.png')
with open(path, 'w') as g:
data = self._request(url).content
g.write(data)
print " ++ 验证码已经保存至", s % (2, 91, path)
validate = raw_input(s % (2, 92, ' 请输入验证码: '))
return validate
def save_cookies(self, cookies=None):
if not cookies:
cookies = ss.cookies.get_dict()
with open(cookie_file, 'w') as g:
json.dump(cookies, g)
def get_durl(self, id_):
while True:
try:
if not args.low:
url = 'http://www.xiami.com/song/gethqsong/sid/%s'
j = self._request(url % id_).json()
t = j['location']
else:
url = 'http://www.xiami.com/song/playlist/id/%s'
cn = self._request(url % id_).text
t = re.search(r'location>(.+?)</location', cn).group(1)
if not t: return None
row = t[0]
encryed_url = t[1:]
durl = decry(row, encryed_url)
return durl
except Exception, e:
print s % (1, 91, ' |-- Error, get_durl --'), e
time.sleep(5)
# FIXME, this request alway returns 405
def record(self, song_id, album_id):
return
# token = ss.cookies.get('_xiamitoken', '')
# t = int(time.time() * 1000)
# self._request(self.template_record.format(
# song_id=song_id, album_id=album_id, token=token, time=t))
def get_cover(self, info):
if info['album_name'] == self.cover_id:
return self.cover_data
else:
self.cover_id = info['album_name']
while True:
url = info['album_pic_url']
try:
self.cover_data = self._request(url).content
if self.cover_data[:5] != '<?xml':
return self.cover_data
except Exception, e:
print s % (1, 91, ' \\\n \\-- Error, get_cover --'), e
time.sleep(5)
def get_lyric(self, info):
def lyric_parser(data):
# get ' ' from http://img.xiami.net/lyric/1_13772259457649.lrc
if len(data) < 10:
return None
if re.search(r'\[\d\d:\d\d', data):
title = ' title: %s\n' % info['song_name'].encode('utf8')
album = ' album: %s\n' % info['album_name'].encode('utf8')
artist = 'artist: %s\n' % info['artist_name'].encode('utf8')
tdict = {}
for line in data.split('\n'):
if re.search(r'^\[\d\d:', line):
cn = re.sub(r'\[\d{2}:\d{2}\.\d{2}\]', '', line)
time_tags = re.findall(r'\[\d{2}:\d{2}\.\d{2}\]', line)
for tag in time_tags: tdict[tag] = cn + '\n'
time_tags = tdict.keys()
time_tags.sort()
data = ''.join([title, album, artist,
'\n------------------\n\n'] + \
[tdict[tag] for tag in time_tags])
return data
else:
# for http://img.xiami.net/lyric/upload/19/1770983119_1356864643.lrc
return data
url = 'http://www.xiami.com/song/playlist/id/%s' % info['song_id']
xml = self._request(url).content
t = re.search('<lyric>(http.+?)</lyric>', xml)
if not t: return None
lyric_url = t.group(1)
data = self._request(lyric_url).content.replace('\r\n', '\n')
data = lyric_parser(data)
if data:
return data.decode('utf8', 'ignore')
else:
return None
def get_disc_description(self, album_url, info):
if not self.html:
self.html = self._request(album_url).text
t = re.findall(re_disc_description, self.html)
t = dict([(a, modificate_text(parser.unescape(b))) \
for a, b in t])
self.disc_description_archives = dict(t)
if self.disc_description_archives.has_key(info['cd_serial']):
disc_description = self.disc_description_archives[info['cd_serial']]
return u'(%s)' % disc_description
else:
return u''
def modified_id3(self, file_name, info):
id3 = ID3()
id3.add(TRCK(encoding=3, text=str(info['track'])))
id3.add(TDRC(encoding=3, text=str(info['year'])))
id3.add(TIT2(encoding=3, text=info['song_name']))
id3.add(TALB(encoding=3, text=info['album_name']))
id3.add(TPE1(encoding=3, text=info['artist_name']))
id3.add(TPOS(encoding=3, text=str(info['cd_serial'])))
lyric_data = self.get_lyric(info)
id3.add(USLT(encoding=3, text=lyric_data)) if lyric_data else None
#id3.add(TCOM(encoding=3, text=info['composer']))
#id3.add(WXXX(encoding=3, desc=u'xiami_song_url', text=info['song_url']))
#id3.add(TCON(encoding=3, text=u'genre'))
#id3.add(TSST(encoding=3, text=info['sub_title']))
#id3.add(TSRC(encoding=3, text=info['disc_code']))
id3.add(COMM(encoding=3, desc=u'Comment', \
text=info['comment']))
id3.add(APIC(encoding=3, mime=u'image/jpeg', type=3, \
desc=u'Front Cover', data=self.get_cover(info)))
id3.save(file_name)
def url_parser(self, urls):
for url in urls:
if '/collect/' in url:
self.collect_id = re.search(r'/collect/(\w+)', url).group(1)
#print(s % (2, 92, u'\n -- 正在分析精选集信息 ...'))
self.download_collect()
elif '/album/' in url:
self.album_id = re.search(r'/album/(\w+)', url).group(1)
#print(s % (2, 92, u'\n -- 正在分析专辑信息 ...'))
self.download_album()
elif '/artist/' in url or 'i.xiami.com' in url:
def get_artist_id(url):
html = self._request(url).text
artist_id = re.search(r'artist_id = \'(\w+)\'', html).group(1)
return artist_id
self.artist_id = re.search(r'/artist/(\w+)', url).group(1) \
if '/artist/' in url else get_artist_id(url)
code = raw_input(' >> a # 艺术家所有专辑.\n' \
' >> r # 艺术家 radio\n' \
' >> t # 艺术家top 20歌曲.\n >> ')
if code == 'a':
#print(s % (2, 92, u'\n -- 正在分析艺术家专辑信息 ...'))
self.download_artist_albums()
elif code == 't':
#print(s % (2, 92, u'\n -- 正在分析艺术家top20信息 ...'))
self.download_artist_top_20_songs()
elif code == 'r':
self.download_artist_radio()
else:
print(s % (1, 92, u' --> Over'))
elif '/song/' in url:
self.song_id = re.search(r'/song/(\w+)', url).group(1)
#print(s % (2, 92, u'\n -- 正在分析歌曲信息 ...'))
self.download_song()
elif '/u/' in url:
self.user_id = re.search(r'/u/(\w+)', url).group(1)
code = raw_input(
' >> m # 该用户歌曲库.\n'
' >> c # 最近在听\n'
' >> s # 分享的音乐\n'
' >> r # 歌曲试听排行 - 一周\n'
' >> rt # 歌曲试听排行 - 全部 \n'
' >> rm # 私人电台:来源于"收藏的歌曲","收藏的专辑",'
' "喜欢的艺人","收藏的精选集"\n'
' >> rc # 虾米猜:基于试听行为所建立的个性电台\n >> ')
if code == 'm':
#print(s % (2, 92, u'\n -- 正在分析用户歌曲库信息 ...'))
self.download_user_songs(url_lib_songs, u'收藏的歌曲')
elif code == 'c':
self.download_user_songs(url_recent, u'最近在听的歌曲')
elif code == 's':
url_shares = 'http://www.xiami.com' \
'/space/feed/u/%s/type/3/page/%s' % (self.user_id, '%s')
self.download_user_shares(url_shares)
elif code == 'r':
url = 'http://www.xiami.com/space/charts/u/%s/c/song/t/week' % self.user_id
self.download_ranking_songs(url, 'week')
elif code == 'rt':
url = 'http://www.xiami.com/space/charts/u/%s/c/song/t/all' % self.user_id
self.download_ranking_songs(url, 'all')
elif code == 'rm':
#print(s % (2, 92, u'\n -- 正在分析该用户的虾米推荐 ...'))
url_rndsongs = url_radio_my
self.download_user_radio(url_rndsongs)
elif code == 'rc':
url_rndsongs = url_radio_c
self.download_user_radio(url_rndsongs)
else:
print(s % (1, 92, u' --> Over'))
elif '/chart/' in url:
self.chart_id = re.search(r'/c/(\d+)', url).group(1) \
if '/c/' in url else 101
type_ = re.search(r'/type/(\d+)', url).group(1) \
if '/type/' in url else 0
self.download_chart(type_)
elif '/genre/' in url:
if '/gid/' in url:
self.genre_id = re.search(r'/gid/(\d+)', url).group(1)
url_genre = 'http://www.xiami.com' \
'/genre/songs/gid/%s/page/%s'
elif '/sid/' in url:
self.genre_id = re.search(r'/sid/(\d+)', url).group(1)
url_genre = 'http://www.xiami.com' \
'/genre/songs/sid/%s/page/%s'
else:
print s % (1, 91, ' !! Error: missing genre id at url')
sys.exit(1)
code = raw_input(' >> t # 风格推荐\n' \
' >> r # 风格radio\n >> ')
if code == 't':
self.download_genre(url_genre)
elif code == 'r':
self.download_genre_radio(url_genre)
elif 'luoo.net' in url:
self.hack_luoo(url)
elif 'sid=' in url:
_mod = re.search(r'sid=([\w+,]+\w)', url)
if _mod:
song_ids = _mod.group(1).split(',')
self.download_songs(song_ids)
else:
print s % (2, 91, u' 请正确输入虾米网址.')
def make_file_name(self, song, cd_serial_auth=False):
z = song['z']
file_name = str(song['track']).zfill(z) + '.' \
+ song['song_name'] \
+ ' - ' + song['artist_name'] + '.mp3'
if cd_serial_auth:
song['file_name'] = ''.join([
'[Disc-',
str(song['cd_serial']),
' # ' + song['disc_description'] \
if song['disc_description'] else '', '] ',
file_name])
else:
song['file_name'] = file_name
def get_songs(self, album_id, song_id=None):
songs = self._api.album(album_id)
if not songs:
return []
cd_serial_auth = int(songs[-1]['cd_serial']) > 1
for song in songs:
self.make_file_name(song, cd_serial_auth=cd_serial_auth)
songs = [i for i in songs if i['song_id'] == song_id] \
if song_id else songs
return songs
def get_song(self, song_id):
song = self._api.song(song_id)
if not song:
return []
self.make_file_name(song)
return [song]
def download_song(self):
songs = self.get_song(self.song_id)
print(s % (2, 97, u'\n >> ' + u'1 首歌曲将要下载.')) \
if not args.play else ''
#self.song_infos = [song_info]
self.download(songs)
def download_songs(self, song_ids):
for song_id in song_ids:
self.song_id = song_id
songs = self.get_song(self.song_id)
self.download(songs)
def download_album(self):
songs = self.get_songs(self.album_id)
if not songs:
return
song = songs[0]
d = song['album_name'] + ' - ' + song['artist_name']
dir_ = os.path.join(os.getcwdu(), d)
self.dir_ = modificate_file_name_for_wget(dir_)
amount_songs = unicode(len(songs))
songs = songs[args.from_ - 1:]
print(s % (2, 97, u'\n >> ' + amount_songs + u' 首歌曲将要下载.')) \
if not args.play else ''
self.download(songs, amount_songs, args.from_)
def download_collect(self):
page = 1
song_ids = []
while True:
params = {
'id': self.collect_id,
'p': page,
'limit': 50,
}
infos = self._request(url_collect, params=params).json()
for info in infos['result']['data']:
song_ids.append(str(info['song_id']))
if infos['result']['total_page'] == page:
break
page += 1
html = self._request('http://www.xiami.com/collect/%s' % self.collect_id).text
html = html.split('<div id="wall"')[0]
collect_name = re.search(r'<title>(.+?)<', html).group(1)
d = collect_name
dir_ = os.path.join(os.getcwdu(), d)
self.dir_ = modificate_file_name_for_wget(dir_)
amount_songs = unicode(len(song_ids))
song_ids = song_ids[args.from_ - 1:]
print(s % (2, 97, u'\n >> ' + amount_songs + u' 首歌曲将要下载.')) \
if not args.play else ''
n = args.from_
for i in song_ids:
songs = self.get_song(i)
self.download(songs, amount_songs, n)
self.html = ''
self.disc_description_archives = {}
n += 1
def download_artist_albums(self):
ii = 1
album_ids = []
while True:
html = self._request(
url_artist_albums % (self.artist_id, str(ii))).text
t = re.findall(r'/album/(\w+)"', html)
if album_ids == t: break
album_ids = t
if album_ids:
for i in album_ids:
print ' ++ http://www.xiami.com/album/%s' % i
self.album_id = i
self.download_album()
self.html = ''
self.disc_description_archives = {}
else:
break
ii += 1
def download_artist_top_20_songs(self):
html = self._request(url_artist_top_song % self.artist_id).text
song_ids = re.findall(r'/music/send/id/(\d+)', html)
artist_name = re.search(
r'<p><a href="/artist/\w+">(.+?)<', html).group(1)
d = modificate_text(artist_name + u' - top 20')
dir_ = os.path.join(os.getcwdu(), d)
self.dir_ = modificate_file_name_for_wget(dir_)
amount_songs = unicode(len(song_ids))
print(s % (2, 97, u'\n >> ' + amount_songs + u' 首歌曲将要下载.')) \
if not args.play else ''
n = 1
for i in song_ids:
songs = self.get_song(i)
self.download(songs, amount_songs, n)
self.html = ''
self.disc_description_archives = {}
n += 1
def download_artist_radio(self):
html = self._request(url_artist_top_song % self.artist_id).text
artist_name = re.search(
r'<p><a href="/artist/\w+">(.+?)<', html).group(1)
d = modificate_text(artist_name + u' - radio')
dir_ = os.path.join(os.getcwdu(), d)
self.dir_ = modificate_file_name_for_wget(dir_)
url_artist_radio = "http://www.xiami.com/radio/xml/type/5/id/%s" \
% self.artist_id
n = 1
while True:
xml = self._request(url_artist_radio).text
song_ids = re.findall(r'<song_id>(\d+)', xml)
for i in song_ids:
songs = self.get_song(i)
self.download(songs, n=n)
self.html = ''
self.disc_description_archives = {}
n += 1
def download_user_songs(self, url, desc):
dir_ = os.path.join(os.getcwdu(),
u'虾米用户 %s %s' % (self.user_id, desc))
self.dir_ = modificate_file_name_for_wget(dir_)
ii = 1
n = 1
while True:
html = self._request(url % (self.user_id, str(ii))).text
song_ids = re.findall(r'/song/(.+?)"', html)
if song_ids:
for i in song_ids:
songs = self.get_song(i)
self.download(songs, n)
self.html = ''
self.disc_description_archives = {}
n += 1
else:
break
ii += 1
def download_user_shares(self, url_shares):
d = modificate_text(u'%s 的分享' % self.user_id)
dir_ = os.path.join(os.getcwdu(), d)
self.dir_ = modificate_file_name_for_wget(dir_)
page = 1
while True:
html = self._request(url_shares % page).text
shares = re.findall(r'play.*\(\'\d+\'\)', html)
for share in shares:
if 'album' in share:
self.album_id = re.search(r'\d+', share).group()
self.download_album()
else:
self.song_id = re.search(r'\d+', share).group()
self.download_song()
if not shares: break
page += 1
def download_ranking_songs(self, url, tp):
d = modificate_text(u'%s 的试听排行 - %s' % (self.user_id, tp))
dir_ = os.path.join(os.getcwdu(), d)
self.dir_ = modificate_file_name_for_wget(dir_)
page = 1
n = 1
while True:
html = self._request(url + '/page/' + str(page)).text
song_ids = re.findall(r"play\('(\d+)'", html)
if not song_ids:
break
for song_id in song_ids:
songs = self.get_song(song_id)
self.download(songs, n=n)
self.html = ''
self.disc_description_archives = {}
n += 1
page += 1
def download_user_radio(self, url_rndsongs):
d = modificate_text(u'%s 的虾米推荐' % self.user_id)
dir_ = os.path.join(os.getcwdu(), d)
self.dir_ = modificate_file_name_for_wget(dir_)
n = 1
while True:
xml = self._request(url_rndsongs % self.user_id).text
song_ids = re.findall(r'<song_id>(\d+)', xml)
for i in song_ids:
songs = self.get_song(i)
self.download(songs, n=n)
self.html = ''
self.disc_description_archives = {}
n += 1
def download_chart(self, type_):
html = self._request('http://www.xiami.com/chart/index/c/%s' \
% self.chart_id).text
title = re.search(r'<title>(.+?)</title>', html).group(1)
d = modificate_text(title)
dir_ = os.path.join(os.getcwdu(), d)
self.dir_ = modificate_file_name_for_wget(dir_)
html = self._request(
'http://www.xiami.com/chart/data?c=%s&limit=200&type=%s' \
% (self.chart_id, type_)).text
song_ids = re.findall(r'/song/(\d+)', html)
n = 1
for i in song_ids:
songs = self.get_song(i)
self.download(songs, n=n)
self.html = ''
self.disc_description_archives = {}
n += 1
def download_genre(self, url_genre):
html = self._request(url_genre % (self.genre_id, 1)).text
if '/gid/' in url_genre:
t = re.search(
r'/genre/detail/gid/%s".+?title="(.+?)"' \
% self.genre_id, html).group(1)
elif '/sid/' in url_genre:
t = re.search(
r'/genre/detail/sid/%s" title="(.+?)"' \
% self.genre_id, html).group(1)
d = modificate_text(u'%s - 代表曲目 - xiami' % t)
dir_ = os.path.join(os.getcwdu(), d)
self.dir_ = modificate_file_name_for_wget(dir_)
n = 1
page = 2
while True:
song_ids = re.findall(r'/song/(\d+)', html)
if not song_ids: break
for i in song_ids:
songs = self.get_song(i)
self.download(songs, n=n)
self.html = ''
self.disc_description_archives = {}
n += 1
html = self._request(url_genre % (self.chart_id, page)).text
page += 1
def download_genre_radio(self, url_genre):
html = self._request(url_genre % (self.genre_id, 1)).text
if '/gid/' in url_genre:
t = re.search(
r'/genre/detail/gid/%s".+?title="(.+?)"' \
% self.genre_id, html).group(1)
url_genre_radio = "http://www.xiami.com/radio/xml/type/12/id/%s" \
% self.genre_id
elif '/sid/' in url_genre:
t = re.search(
r'/genre/detail/sid/%s" title="(.+?)"' \
% self.genre_id, html).group(1)
url_genre_radio = "http://www.xiami.com/radio/xml/type/13/id/%s" \
% self.genre_id
d = modificate_text(u'%s - radio - xiami' % t)
dir_ = os.path.join(os.getcwdu(), d)
self.dir_ = modificate_file_name_for_wget(dir_)
n = 1
while True:
xml = self._request(url_genre_radio).text
song_ids = re.findall(r'<song_id>(\d+)', xml)
for i in song_ids:
songs = self.get_song(i)
self.download(songs, n=n)
self.html = ''
self.disc_description_archives = {}
n += 1
def hack_luoo(self, url):
# parse luoo.net
theaders = headers
theaders.pop('Referer')
r = requests.get(url)
if not r.ok:
return None
cn = r.content
songs_info = re.findall(r'<p class="name">(.+?)</p>\s+'
r'<p class="artist">(?:Artist:|艺人:)(.+?)</p>\s+'
r'<p class="album">(?:Album:|专辑:)(.+?)</p>', cn)
# search song at xiami
for name, artist, album in songs_info:
name = name.strip()
artist = artist.strip()
album = album.strip()
songs = self._api.search_songs(name + ' ' + artist)
if not songs:
print s % (1, 93, ' !! no find:'), ' - '.join([name, artist, album])
continue
self.make_file_name(songs[0])
self.download(songs[:1], n=1)
def display_infos(self, i, nn, n, durl):
length = datetime.datetime.fromtimestamp(i['length']).strftime('%M:%S')
print n, '/', nn
print s % (2, 94, i['file_name'])
print s % (2, 95, i['album_name'])
print s % (2, 93, length)
print 'http://www.xiami.com/song/%s' % i['song_id']
print 'http://www.xiami.com/album/%s' % i['album_id']
print durl
if i['durl_is_H'] == 'h':
print s % (1, 97, 'MP3-Quality:'), s % (1, 92, 'High')
else:
print s % (1, 97, 'MP3-Quality:'), s % (1, 91, 'Low')
print '—' * int(os.popen('tput cols').read())
def get_mp3_quality(self, durl):
if 'm3.file.xiami.com' in durl \
or 'm6.file.xiami.com' in durl \
or '_h.mp3' in durl \
or 'm320.xiami.net' in durl:
return 'h'
else:
return 'l'
def play(self, songs, nn=u'1', n=1):
if args.play == 2:
songs = sorted(songs, key=lambda k: k['play_count'], reverse=True)
for i in songs:
self.record(i['song_id'], i['album_id'])
durl = self.get_durl(i['song_id'])
if not durl:
print s % (2, 91, ' !! Error: can\'t get durl'), i['song_name']
continue
cookies = '; '.join(['%s=%s' % (k, v) for k, v in ss.cookies.items()])
mp3_quality = self.get_mp3_quality(durl)
i['durl_is_H'] = mp3_quality
self.display_infos(i, nn, n, durl)
n = int(n) + 1
cmd = 'mpv --really-quiet ' \
'--cache 8146 ' \
'--user-agent "%s" ' \
'--http-header-fields "Referer: http://img.xiami.com' \
'/static/swf/seiya/1.4/player.swf?v=%s",' \
'"Cookie: %s" ' \
'"%s"' \
% (headers['User-Agent'], int(time.time()*1000), cookies, durl)
os.system(cmd)
timeout = 1
ii, _, _ = select.select([sys.stdin], [], [], timeout)
if ii:
sys.exit(0)
else:
pass
def download(self, songs, amount_songs=u'1', n=1):
dir_ = modificate_file_name_for_wget(self.dir_)
cwd = os.getcwd()
if dir_ != cwd:
if not os.path.exists(dir_):
os.mkdir(dir_)
ii = 1
for i in songs:
num = random.randint(0, 100) % 8
col = s % (2, num + 90, i['file_name'])
t = modificate_file_name_for_wget(i['file_name'])
file_name = os.path.join(dir_, t)
if os.path.exists(file_name): ## if file exists, no get_durl
if args.undownload:
self.modified_id3(file_name, i)
ii += 1
n += 1
continue
else:
ii += 1
n += 1
continue
if not args.undownload:
if n == None:
print(u'\n ++ download: #%s/%s# %s' \
% (ii, amount_songs, col))
else:
print(u'\n ++ download: #%s/%s# %s' \
% (n, amount_songs, col))
n += 1
durl = self.get_durl(i['song_id'])
if not durl:
print s % (2, 91, ' |-- Error: can\'t get durl')
continue
mp3_quality = self.get_mp3_quality(durl)
if mp3_quality == 'h':
print ' |--', s % (1, 97, 'MP3-Quality:'), s % (1, 91, 'High')
else:
print ' |--', s % (1, 97, 'MP3-Quality:'), s % (1, 91, 'Low')
cookies = '; '.join(['%s=%s' % (k, v) for k, v in ss.cookies.items()])
file_name_for_wget = file_name.replace('`', '\`')
quiet = ' -q' if args.quiet else ' -nv'
cmd = 'wget -c%s ' \
'-U "%s" ' \
'--header "Referer:http://img.xiami.com' \
'/static/swf/seiya/1.4/player.swf?v=%s" ' \
'--header "Cookie: member_auth=%s" ' \
'-O "%s.tmp" %s' \
% (quiet, headers['User-Agent'], int(time.time()*1000), cookies, file_name_for_wget, durl)
cmd = cmd.encode('utf8')
status = os.system(cmd)
if status != 0: # other http-errors, such as 302.
wget_exit_status_info = wget_es[status]
print('\n\n ----### \x1b[1;91mERROR\x1b[0m ==> \x1b[1;91m%d ' \
'(%s)\x1b[0m ###--- \n\n' % (status, wget_exit_status_info))
print s % (1, 91, ' ===> '), cmd
sys.exit(1)
else:
os.rename('%s.tmp' % file_name, file_name)
self.modified_id3(file_name, i)
ii += 1
time.sleep(5)
def _save_do(self, id_, type, tags):
data = {
"tags": tags,
"type": type,
"id": id_,
"desc": "",
"grade": "",
"share": 0,
"shareTo": "all",
"_xiamitoken": ss.cookies['_xiamitoken'],
}
url = 'https://www.xiami.com/ajax/addtag'
r = self._request(url, data=data, method='POST')
j = r.json()
if j['status'] == 'ok':
return 0
else:
return j['status']
def save(self, urls):
tags = args.tags
for url in urls:
if '/collect/' in url:
collect_id = re.search(r'/collect/(\w+)', url).group(1)
print s % (1, 97, u'\n ++ save collect:'), \
'http://www.xiami.com/song/collect/' + collect_id
result = self._save_do(collect_id, 4, tags)
elif '/album/' in url:
album_id = re.search(r'/album/(\w+)', url).group(1)
album = self._api.album(album_id)
album_id = album[0].album_id
print s % (1, 97, u'\n ++ save album:'), \
'http://www.xiami.com/album/' + str(album_id)
result = self._save_do(album_id, 5, tags)
elif '/artist/' in url:
artist_id = re.search(r'/artist/(\w+)', url).group(1)
print s % (1, 97, u'\n ++ save artist:'), \
'http://www.xiami.com/artist/' + artist_id
result = self._save_do(artist_id, 6, tags)
elif '/song/' in url:
song_id = re.search(r'/song/(\w+)', url).group(1)
song = self._api.song(song_id)
song_id = song.song_id
print s % (1, 97, u'\n ++ save song:'), \
'http://www.xiami.com/song/' + str(song_id)
result = self._save_do(song_id, 3, tags)
elif '/u/' in url:
user_id = re.search(r'/u/(\d+)', url).group(1)
print s % (1, 97, u'\n ++ save user:'), \
'http://www.xiami.com/u/' + user_id
result = self._save_do(user_id, 1, tags)
else:
result = -1
print(s % (2, 91, u' 请正确输入虾米网址.'))
if result == 0:
print s % (1, 92, ' ++ success.\n')
else:
print s % (1, 91, ' !! Error at _save_do.'), result, '\n'
def main(argv):
if len(argv) < 2:
sys.exit()
######################################################
# for argparse
p = argparse.ArgumentParser(description='downloading any xiami.com')
p.add_argument('xxx', type=str, nargs='*', \
help='命令对象.')
p.add_argument('-p', '--play', action='count', \
help='play with mpv')
p.add_argument('-l', '--low', action='store_true', \
help='low mp3')
p.add_argument('-q', '--quiet', action='store_true', \
help='quiet for download')
p.add_argument('-f', '--from_', action='store', \
default=1, type=int, \
help='从第几个开始下载,eg: -f 42')
p.add_argument('-d', '--undescription', action='store_true', \
help='no add disk\'s distribution')
p.add_argument('-t', '--tags', action='store', \
type=str, default='', help='tags. eg: piano,cello')
p.add_argument('-n', '--undownload', action='store_true', \
help='no download, using to renew id3 tags')
global args
args = p.parse_args(argv[2:])
comd = argv[1]
xxx = args.xxx
if comd == 'login' or comd == 'g':
# or comd == 'logintaobao' or comd == 'gt':
# taobao has updated login algorithms which is hard to hack
# so remove it.
if len(xxx) < 1:
email = raw_input(s % (1, 97, ' username: ') \
if comd == 'logintaobao' or comd == 'gt' \
else s % (1, 97, ' email: '))
cookies = getpass(s % (1, 97, ' cookies: '))
elif len(xxx) == 1:
# for add_member_auth
if '; ' in xxx[0]:
email = None
cookies = xxx[0]
else:
email = xxx[0]
cookies = getpass(s % (1, 97, ' cookies: '))
elif len(xxx) == 2:
email = xxx[0]
cookies = xxx[1]
else:
msg = ('login: \n'
'login cookies')
print s % (1, 91, msg)
return
x = xiami()
x.add_cookies(cookies)
is_signin = x.check_login()
if is_signin:
print s % (1, 92, ' ++ login succeeds.')
else:
print s % (1, 91, ' login failes')
elif comd == 'signout':
g = open(cookie_file, 'w')
g.close()
elif comd == 'd' or comd == 'download':
urls = xxx
x = xiami()
x.init()
x.url_parser(urls)
elif comd == 'p' or comd == 'play':
if not args.play: args.play = 1
urls = xxx
x = xiami()
x.init()
x.url_parser(urls)
elif comd == 's' or comd == 'save':
urls = xxx
x = xiami()
x.init()
x.save(urls)
else:
print s % (2, 91, u' !! 命令错误\n')
if __name__ == '__main__':
argv = sys.argv
main(argv)
|
mit
| 8,859,938,585,418,477,000
| 35.344532
| 177
| 0.472513
| false
| 3.357534
| false
| false
| false
|
ctsit/redcap_deployment
|
utility_redcap.py
|
1
|
4631
|
from fabric.api import *
from tempfile import mkstemp
import os
import utility
try:
import configparser
except:
from six.moves import configparser
__all__ = []
def get_current_redcap_version():
"""
gets the current redcap version from database
"""
with settings(user=env.deploy_user):
with hide('output'):
current_version = run('mysql -s -N -e "SELECT value from redcap_config WHERE field_name=\'redcap_version\'"')
return current_version
def make_upload_target():
"""
Make the directory from which new software will be deployed,
e.g., /var/www.backup/redcap-20160117T1543/
"""
env.upload_target_backup_dir = '/'.join([env.upload_project_full_path, env.remote_project_name])
with settings(user=env.deploy_user):
run("mkdir -p %(upload_target_backup_dir)s" % env)
def upload_package_and_extract(name, upgrade=False):
"""
Upload the redcap package and extract it into the directory from which new
software will be deployed, e.g., /var/www.backup/redcap-20160117T1543/
"""
# NOTE: run as $ fab <env> package make_upload_target upe ...necessary env
# variables are set by package and make_upload_target funcitons
with settings(user=env.deploy_user):
# Make a temp folder to upload the tar to
temp1 = run('mktemp -d')
put(name, temp1)
# Test where temp/'receiving' is
temp2 = run('mktemp -d')
# Extract in temp ... -C specifies what directory to extract to
# Extract to temp2 so the tar is not included in the contents
run('tar -xzf %s/%s -C %s' % (temp1, name, temp2))
# Transfer contents from temp2/redcap to ultimate destination
with settings(warn_only=True):
if run('test -d %s/webtools2/pdf/font/unifont' % env.upload_target_backup_dir).succeeded:
run('chmod ug+w %s/webtools2/pdf/font/unifont/*' % env.upload_target_backup_dir)
# Write the new code on top of the existing code
if upgrade == False:
run('rsync -rc %s/redcap/* %s' % (temp2, env.upload_target_backup_dir))
else:
# exclude some files during upgrades
exclusions = "--exclude=database.php --exclude=hook_functions.php"
run('rsync -rc %s %s/redcap/* %s' % (exclusions, temp2, env.upload_target_backup_dir))
# make sure the temp file directory in redcap web space will be writeable
run('chmod -R g+w %s/temp' % env.upload_target_backup_dir)
# Remove the temp directories
run('rm -rf %s %s' % (temp1, temp2))
def move_software_to_live():
"""
Replace the symbolic link to the old code with symbolic link to new code.
"""
with settings(user=env.deploy_user):
with settings(warn_only=True):
if run("test -d %(live_project_full_path)s" % env).succeeded:
# we need to back this directory up on the fly, destroy it and then symlink it back into existence
with settings(warn_only=False):
new_backup_dir = env.upload_target_backup_dir + "-previous"
run("mkdir -p %s" % new_backup_dir)
run("cp -rf -P %s/* %s" % (env.live_project_full_path, new_backup_dir))
run("rm -rf %s" % env.live_project_full_path)
# now switch the new code to live
run('ln -s %s %s' % (env.upload_target_backup_dir,env.live_project_full_path))
# update directory permissions
run('chmod 775 %s/modules' %env.upload_target_backup_dir)
def set_redcap_base_url():
"""
Set the REDCap base url
"""
set_redcap_config('redcap_base_url', env.url_of_deployed_app)
def set_redcap_config(field_name="", value=""):
"""
Update a single values in the redcap config table
"""
with settings(user=env.deploy_user):
run('echo "update redcap_config set value=\'%s\' where field_name = \'%s\';" | mysql' % (value, field_name))
def test(warn_only=False):
"""
Run all tests against a running REDCap instance
"""
utility.write_remote_my_cnf()
version = get_current_redcap_version()
utility.delete_remote_my_cnf()
local("python tests/test.py %s/ redcap_v%s/" % (env.url_of_deployed_app,version))
with settings(warn_only=True):
if local("python tests/test.py %s/ redcap_v%s/" % (env.url_of_deployed_app,version)).failed:
if warn_only:
warn("One or more tests failed.")
return(False)
else:
abort("One or more tests failed.")
else:
return(True)
|
bsd-3-clause
| -1,521,481,576,971,571,200
| 37.915966
| 121
| 0.616929
| false
| 3.562308
| true
| false
| false
|
taxipp/ipp-macro-series-parser
|
ipp_macro_series_parser/denombrements_fiscaux/denombrements_parsers.py
|
1
|
29920
|
import logging
import numpy
import os
import pandas
import pkg_resources
import re
from ipp_macro_series_parser.config import Config
config_parser = Config()
xls_directory = config_parser.get('data', 'denombrements_fiscaux_xls')
hdf_directory = config_parser.get('data', 'denombrements_fiscaux_hdf')
log = logging.getLogger(__name__)
def parse_ipp_denombrements():
file_path = os.path.join(xls_directory, u'Agrégats IPP - Données fiscales.xls')
def parse_bloc(name = None, sheetname = '2042-montant', skiprows = 0, parse_cols = None, slice_start = None,
slice_end = None, prefix = ''):
assert name is not None
df = pandas.read_excel(
file_path,
na_values = '-',
sheetname = sheetname,
skiprows = skiprows,
parse_cols = parse_cols).iloc[slice_start:slice_end]
df.columns = ['year'] + (prefix + df.columns[1:].str.lower()).tolist()
try:
df = df.convert_objects(convert_numeric=True)
df = df.astype(float)
df.year = df.year.astype(int)
except Exception as e:
print(e)
return name, df
return name, df
# Fiche principale
# 1 - Traitements, salaire, prime pour l'emploi, pensions et rentes
traitements_salaires = dict(
name = 'traitements_salaires',
sheetname = '2042-montant',
skiprows = 4,
parse_cols = 'A:AB',
slice_start = 1,
slice_end = 18,
prefix = 'f1',
)
prime_emploi = dict(
name = 'prime_emploi',
sheetname = '2042-montant',
skiprows = 25,
parse_cols = 'A:K',
slice_start = 1,
slice_end = 17,
prefix = 'f1',
)
pension_retraite = dict(
name = 'pension_retraite',
sheetname = '2042-montant',
skiprows = 46,
parse_cols = 'A:M',
slice_start = 1,
slice_end = 18,
prefix = 'f1',
)
rentes_viageres_titre_onereux = dict(
name = 'rentes_viageres_titre_onereux',
sheetname = '2042-montant',
skiprows = 68,
parse_cols = 'A:E',
slice_start = 1,
slice_end = 17,
prefix = 'f1',
)
# 2 - Revenus des valeurs et capitaux mobiliers
prelevement_forfaitaire_liberatoire = dict(
name = 'prelevement_forfaitaire_liberatoire',
sheetname = '2042-montant',
skiprows = 89,
parse_cols = 'A:D',
slice_start = 1,
slice_end = 18,
prefix = 'f2',
)
revenus_avec_abattement = dict(
name = 'revenus_avec_abattement',
sheetname = '2042-montant',
skiprows = 111,
parse_cols = 'A:E',
slice_start = 1,
slice_end = 18,
prefix = 'f2',
)
revenus_sans_abattement = dict(
name = 'revenus_sans_abattement',
sheetname = '2042-montant',
skiprows = 133,
parse_cols = 'A:D',
slice_start = 1,
slice_end = 18,
prefix = 'f2',
)
autres_revenus_financiers = dict(
name = 'autres_revenus_financiers',
sheetname = '2042-montant',
skiprows = 154,
parse_cols = 'A:I',
slice_start = 1,
slice_end = 18,
prefix = 'f2',
)
# 3 - Plus values et gains taxables à 16% (18% à partir de 2008)
plus_values = dict(
name = 'plus_values',
sheetname = '2042-montant',
skiprows = 199,
parse_cols = 'A:C',
slice_start = 1,
slice_end = 19,
prefix = 'f3',
)
# 4 - Revenus fonciers
# TODO: copier coller d'une note
# Pour les dénombrements de 96 à 2001, on ne connait plus le détail des différents déficits mais seulement total
# agrégé (case total def)
# Comme les parts des différents déficits sur le déficit total est pratiquement constant dans le temps, on assume
# donc que la répartition du déficit total entre les différents déficits est constant entre 96 et 2001 et égal à son
# niveau de 2003
# TODO: virer 2012 à 2014 ?
revenus_fonciers = dict(
name = 'revenus_foncier',
sheetname = '2042-montant',
skiprows = 222,
parse_cols = 'A:H',
slice_start = 1,
slice_end = 20,
prefix = 'f3',
)
contribution_revenus_locatifs = dict(
name = 'contribution_revenus_locatifs',
sheetname = '2042-montant',
skiprows = 246,
parse_cols = 'A:C',
slice_start = 1,
slice_end = 18,
prefix = 'f4',
)
# 5- Revenus exceptionnels ou différés
revenus_exceptionnels = dict(
name = 'revenus_exceptionnels',
sheetname = '2042-montant',
skiprows = 268,
parse_cols = 'A:B',
slice_start = 1,
slice_end = 19,
prefix = 'f5',
)
# 6- Charges déductibles et imputations diverses
charges_deductibles = dict(
name = 'charges_deductibles',
sheetname = '2042-montant',
skiprows = 316,
parse_cols = 'A:I',
slice_start = 1,
slice_end = 19,
prefix = 'f6',
)
epargne_retraite = dict(
name = 'epargne_retraite',
sheetname = '2042-montant',
skiprows = 338,
parse_cols = 'A:O',
slice_start = 1,
slice_end = 18,
prefix = 'f6',
)
# 7- Charges ouvrant droit à réduction ou à crédit d'impôt
reductions_credits_impot = dict(
name = 'reductions_credits_impot',
sheetname = '2042-montant',
skiprows = 360,
parse_cols = 'A:BH',
slice_start = 1,
slice_end = 18,
prefix = 'f7',
)
# 8- Autres imputations, reprises de réductions d'impôt, conventions internationales, divers
autres_imputations = dict(
name = 'autres_imputations',
sheetname = '2042-montant',
skiprows = 383,
parse_cols = 'A:L',
slice_start = 1,
slice_end = 18,
prefix = 'f7',
)
# Fiche complémentaire
# 1- Gains de levée d'options
options = dict(
name = 'options',
sheetname = '2042C - montant',
skiprows = 5,
parse_cols = 'A:I',
slice_start = 0,
slice_end = 17,
prefix = 'f1',
)
name, df = parse_bloc(**options)
df.dtypes
df.year
# salaires exonérés
salaires_exoneres = dict(
name = 'salaires_exoneres',
sheetname = '2042C - montant',
skiprows = 26,
parse_cols = 'A:I',
slice_start = 0,
slice_end = 17,
prefix = 'f1',
)
# crédit d'impôt mobilité
# TODO; nothing in agrégats IPP
# 3- Plus-values et gains divers
plus_values_complementaire = dict(
name = 'plus_values_complementaire',
sheetname = '2042C - montant',
skiprows = 67,
parse_cols = 'A:T',
slice_start = 0,
slice_end = 17,
prefix = 'f3',
)
# 4- Revenus fonciers
revenus_fonciers_complementaire = dict(
name = 'revenus_fonciers_complementaire',
sheetname = '2042C - montant',
skiprows = 88,
parse_cols = 'A:B',
slice_start = 0,
slice_end = 17,
prefix = 'f4',
)
# 5- Revenus et plus-values des professions non salariées
prime_emploi_complementaire = dict(
name = 'prime_emploi_complementaire',
sheetname = '2042C - montant',
skiprows = 111,
parse_cols = 'A:G',
slice_start = 0,
slice_end = 17,
prefix = 'f5',
)
revenus_agricoles_forfait = dict(
name = 'revenus_agricoles_forfait',
sheetname = '2042C - montant',
skiprows = 167,
parse_cols = 'A:Q',
slice_start = 0,
slice_end = 18,
prefix = 'f5',
)
revenus_agricoles_reel = dict(
name = 'revenus_agricoles_reel',
sheetname = '2042C - montant',
skiprows = 190,
parse_cols = 'A:Y',
slice_start = 0,
slice_end = 18,
prefix = 'f5',
)
revenus_agricoles_deficits = dict(
name = 'revenus_agricoles_deficits',
sheetname = '2042C - montant',
skiprows = 212,
parse_cols = 'A:M',
slice_start = 1,
slice_end = 18,
prefix = 'f5',
)
# TODO: *Avant 2007, les cases HE, IE, JE étaient séparé en deux (cases HE et HK,…,JE et JK) en fonction de
# l'appartenance ou non à un CGA
# Revenus industriels et commerciaux professionnels
bic_pro_micro_entreprise = dict(
name = 'bic_pro_micro_entreprise',
sheetname = '2042C - montant',
skiprows = 237,
parse_cols = 'A:U',
slice_start = 0,
slice_end = 17,
prefix = 'f5',
)
bic_pro_reel = dict(
name = 'bic_pro_reel',
sheetname = '2042C - montant',
skiprows = 282,
parse_cols = 'A:AE',
slice_start = 0,
slice_end = 17,
prefix = 'f5',
)
# TODO
# Pour les revenus de 1997, il n'y a pas de distinction entre les BIC professionnels et les BIC non professionnels.
# On choisit de mettre les "BIC exonérés" dans cette case (et de ne rien mettre dans la case NB associée aux BIC
# non professionnels exonérés).
bic_pro_cga = dict(
name = 'bic_pro_cga',
sheetname = '2042C - montant',
skiprows = 304,
parse_cols = 'A:G',
slice_start = 0,
slice_end = 17,
prefix = 'f5',
)
bic_non_pro_micro_entreprise = dict(
name = 'bic_non_pro_micro_entreprise',
sheetname = '2042C - montant',
skiprows = 328,
parse_cols = 'A:T',
slice_start = 0,
slice_end = 18,
prefix = 'f5',
)
bic_non_pro_reel = dict(
name = 'bic_non_pro_reel',
sheetname = '2042C - montant',
skiprows = 351,
parse_cols = 'A:AH',
slice_start = 0,
slice_end = 17,
prefix = 'f5',
)
# Pour l'année 1997, on dispose d'un montant agrégé pour les BIC non professionneles et les BNC non professionnels,
# sans distinction non plus du régime d'imposition (simplifié, réel). Pour cette année, on met le montant agrégé
# dans la case NC pour les revenus et dans la case NF pour les déficits. Il s'agit des cases relatives aux BIC non
# professionnels imposés au régime réel.
bic_non_pro_deficit_anterieur = dict(
name = 'bic_non_pro_deficit_anterieur',
sheetname = '2042C - montant',
skiprows = 373,
parse_cols = 'A:G',
slice_start = 0,
slice_end = 17,
prefix = 'f5',
)
# Revenus non commerciaux professionnels
bnc_pro_micro_vous = dict(
name = 'bnc_pro_micro_vous',
sheetname = '2042C - montant',
skiprows = 396,
parse_cols = 'A:P',
slice_start = 0,
slice_end = 18,
prefix = 'f5',
)
# *Avant 2007, la cases QD était séparé en deux (cases QD et QJ) en fonction de l'appartenance ou non à un AA
bnc_pro_micro_conj = dict(
name = 'bnc_pro_micro_conj',
sheetname = '2042C - montant',
skiprows = 417,
parse_cols = 'A:O',
slice_start = 0,
slice_end = 17,
prefix = 'f5',
)
# *Avant 2007, la cases RD était séparé en deux (cases RD et RJ) en fonction de l'appartenance ou non à un AA
bnc_pro_micro_pac = dict(
name = 'bnc_pro_micro_pac',
sheetname = '2042C - montant',
skiprows = 437,
parse_cols = 'A:N',
slice_start = 0,
slice_end = 17,
prefix = 'f5',
)
# *Avant 2007, la cases SD était séparé en deux (cases SD et SJ) en fonction de l'appartenance ou non à un AA
# Revenus non commerciaux non professionnels
bnc_non_pro_vous = dict(
name = 'bnc_non_pro_vous',
sheetname = '2042C - montant',
skiprows = 482,
parse_cols = 'A:T',
slice_start = 1,
slice_end = 18,
prefix = 'f5',
)
# * Avant 2006, l'ensemble des variables de JG à MT ne concerne plus seulement le contribuable mais l'ensemble du
# foyer. Les cases JK à SW et LK à SX sont donc supprimées.
bnc_non_pro_conj = dict(
name = 'bnc_non_pro_conj',
sheetname = '2042C - montant',
skiprows = 502,
parse_cols = 'A:M',
slice_start = 1,
slice_end = 18,
prefix = 'f5',
)
bnc_non_pro_pac = dict(
name = 'bnc_non_pro_pac',
sheetname = '2042C - montant',
skiprows = 521,
parse_cols = 'A:M',
slice_start = 1,
slice_end = 18,
prefix = 'f5',
)
# Revenus accessoires
# TODO
# Revenus a imposer aux prelevements sociaux
revenus_prelevements_sociaux = dict(
name = 'revenus_prelevements_sociaux',
sheetname = '2042C - montant',
skiprows = 567,
parse_cols = 'A:I',
slice_start = 0,
prefix = 'f5',
slice_end = 17,
)
# 6- Charges et imputations diverses = charges à déduire du revenu
charges_imputations_diverses = dict(
name = 'charges_imputations_diverses',
sheetname = '2042C - montant',
skiprows = 587,
parse_cols = 'A:R',
slice_start = 2,
prefix = 'f5',
slice_end = 19,
)
# 3 Cette case EH (investissemencompte épargne co-developpement) n'a rien à voir avec la case EH colonne O
# (investissement DOM-TOM)
# 4 : Cette case était dans la déclaration 2042 avant 2007 (case somme à ajouter au revenu imposable)
# 7- Charges ouvrant droit à réduction ou à crédit d'impôt
reductions_credits_impot_complementaire = dict(
name = 'reductions_credits_impot_complementaire',
sheetname = '2042C - montant',
skiprows = 613,
parse_cols = 'A:BA',
slice_start = 2,
prefix = 'f5',
slice_end = 20,
)
# 3 : les données brutes sont abérrantes pour l'année 2007, on vait par exemple 113 863 3, il manque donc deux zéros
# derrères le 3. Et pour UA et UJ, j'ai rajouté 3 zéros derrières les nombres brutes pour avoir le bon rapport de
# grandeur.
# * UI = Total réduction d'impôt Outre-mer Avant 2008 : la déclaration détaille les composantes des Ivt Outremer par
# secteur d'activité
# 8- Autres imputations, conventions internationales, crédits d'impôt entreprise
autres_imputations_complementaire = dict(
name = 'autres_imputations_complementaire',
sheetname = '2042C - montant',
skiprows = 639,
parse_cols = 'A:Z',
slice_start = 1,
prefix = 'f5',
slice_end = 20,
)
# name, df = parse_bloc(**autres_imputations_complementaire)
# print df.dtypes
# df.year
# 8- Autres imputations, conventions internationales, crédits d'impôt entreprise
blocs = [
traitements_salaires,
prime_emploi,
pension_retraite,
rentes_viageres_titre_onereux,
prelevement_forfaitaire_liberatoire,
revenus_avec_abattement,
revenus_sans_abattement,
autres_revenus_financiers,
plus_values,
revenus_fonciers,
contribution_revenus_locatifs,
revenus_exceptionnels,
charges_deductibles,
epargne_retraite,
reductions_credits_impot,
autres_imputations,
options,
salaires_exoneres,
plus_values_complementaire,
revenus_fonciers_complementaire,
prime_emploi_complementaire,
revenus_agricoles_forfait,
revenus_agricoles_reel,
revenus_agricoles_deficits,
bic_pro_micro_entreprise,
bic_pro_reel,
bic_pro_cga,
bic_non_pro_micro_entreprise,
bic_non_pro_reel,
bic_non_pro_deficit_anterieur,
bnc_pro_micro_vous,
bnc_pro_micro_conj,
bnc_pro_micro_pac,
bnc_non_pro_vous,
bnc_non_pro_conj,
bnc_non_pro_pac,
revenus_prelevements_sociaux,
charges_imputations_diverses,
reductions_credits_impot_complementaire,
autres_imputations_complementaire
]
data_frame_by_bloc_name = dict(parse_bloc(**bloc) for bloc in blocs)
correct_errors(data_frame_by_bloc_name, show_only = False)
ipp_denombrements = pandas.DataFrame()
for data_frame in data_frame_by_bloc_name.values():
ipp_denombrements = pandas.concat((
ipp_denombrements,
pandas.melt(data_frame, id_vars=['year'], var_name = 'code')
))
ipp_denombrements.dropna(inplace = True)
return ipp_denombrements
def correct_errors(data_frame_by_bloc_name, show_only = False):
import re
pattern = re.compile("^f[1-8][a-z][a-z]$")
note_pattern = re.compile("^f[1-8][a-z][a-z][1-4]$")
corrected_columns = set()
problematic_columns = set()
for bloc_name, data_frame in data_frame_by_bloc_name.items():
correct_name_by_wrong_name = dict()
drop_columns = list()
for column in data_frame.columns:
if column == 'year':
assert numpy.issubdtype(data_frame[column].dtype, numpy.integer)
assert data_frame[column].isin(range(1990, 2015)).all()
continue
if not pattern.match(column):
# print '- ' + str(column)
# remove trailing spaces
problematic_columns.add(column)
if column != column.strip():
correct_name_by_wrong_name[column] = column.strip()
# remove *
if column.endswith('*') and pattern.match(column[:-1]):
correct_name_by_wrong_name[column] = column[:-1]
# remove unnamed
if "unnamed" in column or "-" in column or 'total' in column:
drop_columns.append(column)
# remove trailing 1, 2, 3, 4 (notes in excel file)
if note_pattern.match(column):
correct_name_by_wrong_name[column] = column[:-1]
corrected_columns = corrected_columns.union(set(correct_name_by_wrong_name.keys()))
corrected_columns = corrected_columns.union(set(drop_columns))
if not show_only:
data_frame.drop(labels = drop_columns, axis = 1, inplace = True)
data_frame.rename(columns = correct_name_by_wrong_name, inplace = True)
print('Remaining problematic columns')
print(problematic_columns.difference(corrected_columns))
def parse_openfisca_denombrements():
openfisca_denombrements = pandas.read_excel(os.path.join(xls_directory, '2042_national.xls'), sheetname = 'montant')
assert openfisca_denombrements.dtypes.apply(lambda x: numpy.issubdtype(x, numpy.float)).all(), \
openfisca_denombrements.dtypes
openfisca_denombrements = openfisca_denombrements.stack().reset_index()
openfisca_denombrements.rename(columns = {'level_0': 'code', 'level_1': 'year', 0: 'value'}, inplace = True)
openfisca_denombrements[['year']] = openfisca_denombrements[['year']].astype(int)
return openfisca_denombrements
def parse_dgfip_denombrements(years = None):
assert years is not None
assert min(years) >= 2001
assert max(years) <= 2013
dgfip_directory = os.path.join(xls_directory, 'D2042Nat')
files = os.listdir(dgfip_directory)
result = pandas.DataFrame()
for year in years:
file_regex = re.compile("^R20{}".format(str(year)[2:4]))
for filename in files:
if file_regex.match(filename):
log.info("Using file {} for year {}".format(filename, year))
break
print(year)
if year in [2001, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013]:
dgfip_denombrements = pandas.read_excel(os.path.join(dgfip_directory, filename))
if year == 2003:
dgfip_denombrements = pandas.read_excel(os.path.join(dgfip_directory, filename), skiprows = 4)
if year in [2001, 2003]:
regex = re.compile("^[0-9][A-Z]{2}")
dgfip_denombrements.code.fillna("", inplace = True)
dgfip_denombrements = dgfip_denombrements.set_index('code').filter(regex = regex, axis = 0)
new_variable_name_by_old = dict(
(x, "f{}".format(x.lower())) for x in dgfip_denombrements.index)
dgfip_denombrements = dgfip_denombrements.rename(index = new_variable_name_by_old)
dgfip_denombrements['year'] = year
dgfip_denombrements.rename(columns = {'montant': 'value', 'Nombre': 'nombre'}, inplace = True)
del dgfip_denombrements['nombre']
# TODO:
if year in [2005, 2006, 2007, 2008]:
# continue
regex = re.compile("[A-Z]{2}")
dgfip_denombrements = dgfip_denombrements.set_index('nom').filter(regex = regex, axis = 0)
dgfip_denombrements.index.name = 'code'
new_variable_name_by_old = dict(
(x, "f{}".format(x.lower())) for x in dgfip_denombrements.index)
dgfip_denombrements = dgfip_denombrements.rename(index = new_variable_name_by_old)
print(dgfip_denombrements)
boum
# trouver un moyen de renommer les codes pour qu'il y ait le numéro des sections
# dgfip_denombrements.rename(columns = {'nom': 'code'}, inplace = True)
# for ind in dgfip_denombrements.index:
# if re.match("[A-Z][I, J, K, O, P, Q, S, V, W, X]", dgfip_denombrements.ix[ind]['code']):
# print dgfip_denombrements.ix[ind]['code']
# dgfip_denombrements.rename(
# {dgfip_denombrements.ix[ind]['code']: "1{}".format(dgfip_denombrements.ix[ind]['code'])}) # ,inplace = True
#
# or
# dgfip_denombrements = dgfip_denombrements.filter(items = ['nom'], regex = regex)
#
# dgfip_denombrements['code'] = dgfip_denombrements['nom']
# for ind in dgfip_denombrements.index:
# if re.match("[A-Z][I, J, K, O, P, Q, S, V, W, X]", dgfip_denombrements.ix[ind]['nom']):
# print dgfip_denombrements.ix[ind]['nom']
# dgfip_denombrements.ix[ind]['code'] = "1{}".format(dgfip_denombrements.ix[ind]['nom'])
#
# dgfip_denombrements = dgfip_denombrements.set_index('code').filter(regex = regex, axis = 0)
if year == 2004:
regex = re.compile("^Z[0-9][A-Z]{2}")
dgfip_denombrements.case.fillna("", inplace = True)
dgfip_denombrements.drop_duplicates(['case'], inplace = True)
dgfip_denombrements = dgfip_denombrements.set_index('case').filter(regex = regex, axis = 0)
dgfip_denombrements.index.name = 'code'
new_variable_name_by_old = dict(
(x, "f{}".format(x[1:].lower())) for x in dgfip_denombrements.index)
dgfip_denombrements = dgfip_denombrements.rename(index = new_variable_name_by_old)
dgfip_denombrements.reset_index(inplace = True)
dgfip_denombrements['year'] = year
dgfip_denombrements.rename(columns = {'Montant': 'value'}, inplace = True)
del dgfip_denombrements['Nombre'], dgfip_denombrements[u'libellé'], dgfip_denombrements['nom']
if year in [2009, 2010, 2011, 2012]:
regex = re.compile("^Z[0-9][A-Z]{2}")
dgfip_denombrements = dgfip_denombrements.set_index('nom').filter(regex = regex, axis = 0)
dgfip_denombrements.index.name = 'code'
new_variable_name_by_old = dict(
(x, "f{}".format(x[1:].lower())) for x in dgfip_denombrements.index)
dgfip_denombrements = dgfip_denombrements.rename(index = new_variable_name_by_old)
dgfip_denombrements.reset_index(inplace = True)
dgfip_denombrements['year'] = year
if year == 2009:
dgfip_denombrements.rename(columns = {'Montants': 'value', 'Nombre': 'nombre'}, inplace = True)
else:
dgfip_denombrements.rename(columns = {'montants': 'value'}, inplace = True)
del dgfip_denombrements['maximal'], dgfip_denombrements['nombre']
if year == 2013:
regex = re.compile("^Z[0-9][A-Z]{2}")
dgfip_denombrements = dgfip_denombrements.set_index('nom').filter(regex = regex, axis = 0)
dgfip_denombrements.index.name = 'code'
new_variable_name_by_old = dict(
(x, "f{}".format(x[1:].lower())) for x in dgfip_denombrements.index)
dgfip_denombrements = dgfip_denombrements.rename(index = new_variable_name_by_old)
dgfip_denombrements.reset_index(inplace = True)
dgfip_denombrements['year'] = year
dgfip_denombrements.rename(columns = {'ano': 'value'}, inplace = True)
del dgfip_denombrements['pas_ano']
result = pandas.concat((result, dgfip_denombrements))
result.dropna(subset = ['value'], inplace = True) # dropping NA's
return result.loc[result.value != 0].copy() # excluding 0 values
def create_denombrements_fiscaux_data_frame(year = None, years = None, overwrite = False):
"""
Generates the table with all the data from Dénombrements Fiscaux .
Parameters
----------
year : int
year of DGFIP data (coincides with year of declaration)
years : list of integers
list of years of interest. Optional.
Example
--------
>>> table_2013 = denombrements_fiscaux_df_generator(year = 2013)
Returns the main table of dénombrements fiscaux for the year 2013.
"""
if year is not None and years is None:
years = [year]
log.info('Parsing dénombrements fiscaux raw data for the following years: {}'.format(years))
# Data coming for openfisca xls file
openfisca_denombrements = parse_openfisca_denombrements()
openfisca_denombrements['origin'] = 'OF'
# Data coming from IPP
ipp_denombrements = parse_ipp_denombrements()
ipp_denombrements['origin'] = 'IPP'
df = pandas.concat([ipp_denombrements, openfisca_denombrements])
# Drop real duplicates
df = df.drop_duplicates(subset = ['year', 'code', 'value'])
df = df.reset_index(drop=True)
# Problematic duplicates
dups = df.duplicated(['year', 'code']) | df.duplicated(['year', 'code'], keep = 'last')
z = df.loc[dups].copy()
# sum of two columns in IPP for year < 2007
wrong_before_2007 = ['f5ne', 'f5oe', 'f5rd', 'f5ke', 'f5le', 'f5he', 'f5ie', 'f5qd']
df = df.loc[~(df.code.isin(wrong_before_2007) & (df.year < 2007))]
log.info('Remaining roblematic duplicates when merging IPP and OF \n {}'.format(
z.loc[~(z.code.isin(wrong_before_2007) & (z.year < 2007))]
))
df = df.loc[df.year.isin(years)].copy() if years is not None else df.copy()
# Data coming from DGFiP
dgfip_denombrements = parse_dgfip_denombrements(years)
dgfip_denombrements['origin'] = 'DGFiP'
df2 = pandas.concat([dgfip_denombrements, df])
# Drop real duplicates
df2 = df2.drop_duplicates(subset = ['year', 'code', 'value'])
df2 = df2.reset_index(drop=True)
dups2 = df2.duplicated(['year', 'code']) | df2.duplicated(['year', 'code'], keep = 'last')
errors = df2.loc[dups2].copy()
wrong_codes = ['f5ne', 'f5oe', 'f5rd', 'f5ke', 'f5le', 'f4tq', 'f5hd',
'f5id', 'f5he', 'f5ie', 'f5qd', 'f3ve', 'f3vf', 'f3ve', 'f3vf', 'f7tf', 'f7tf', 'f2gr', 'f2ch', 'f2bg', 'f6el',
'f6st', 'f2bg', 'f7cd', 'f2gr', 'f2ch', 'f7cd', 'f6st', 'f6el']
wrong_years = [2006, 2005, 2004, 2003]
log.info('Remaining problematic duplicates when merging with DGFiP data \n {}'.format(
errors.loc[~(errors.code.isin(wrong_codes) | errors.year.isin(wrong_years))]
))
df2 = df2.loc[~(df2.code.isin(wrong_codes) | (df2.year.isin(wrong_years)))]
result = df2.loc[df2.year.isin(years)].copy() if years is not None else df2.copy()
log.info('For now, we keep only DGFiP data')
result = dgfip_denombrements.copy() # TODO: recoupement avec data OpenFisca & IPP
if overwrite:
save_df_to_hdf(result, 'denombrements_fiscaux.h5', 'montants')
return result, errors
def build_section_code():
openfisca_denombrements = parse_openfisca_denombrements()
ipp_denombrements = parse_ipp_denombrements()
df = pandas.concat([openfisca_denombrements.code, openfisca_denombrements.code])
return df.unique()
def get_denombrements_fiscaux_data_frame(year = None, years = None, rebuild = False, overwrite = False,
fill_value = numpy.nan):
if year is not None and years is None:
years = [year]
if rebuild:
return create_denombrements_fiscaux_data_frame(years = years, overwrite = overwrite)
else:
data_frame = import_from_hdf('denombrements_fiscaux.h5', 'montants')
return data_frame.loc[data_frame.year.isin(years)].copy()
def save_df_to_hdf(df, hdf_filename, key):
file_path = os.path.join(hdf_directory, hdf_filename)
df.to_hdf(file_path, key)
def import_from_hdf(hdf_filename, key):
file_path = os.path.join(hdf_directory, hdf_filename)
store = pandas.HDFStore(file_path)
df = store[key]
return df
if __name__ == '__main__':
build_section_code()
dgfip = parse_dgfip_denombrements(years = range(2008, 2009))
print(dgfip)
# denomb_fisc_all, errors = create_denombrements_fiscaux_data_frame(
# years = range(2009, 2014),
# overwrite = True,
# )
|
gpl-3.0
| -7,274,436,793,316,057,000
| 33.142039
| 132
| 0.580588
| false
| 3.091909
| false
| false
| false
|
johnnoone/zbx
|
zbx/io/defaults.py
|
1
|
3539
|
"""
zbx.io.defaults
~~~~~~~~~~~~~~~
Defines all zabbix defaults
"""
__all__ = ['rules', 'RuleSet']
from abc import ABCMeta
from itertools import chain
from six import add_metaclass
@add_metaclass(ABCMeta)
class RuleSet(object):
def __init__(self, path, rules):
self.path = path
self.rules = rules
def __iter__(self):
try:
rules = self.rules.items()
except AttributeError:
rules = self.rules
for rule in rules:
try:
if isinstance(rule, RuleSet):
for endpath, value in rule:
yield self.format_path(endpath), value
else:
endpath, value = rule
yield self.format_path(endpath), value
except Exception:
raise
def format_path(self, path):
if self.path:
return '{}/{}'.format(self.path, path)
return path
def __add__(self, other):
return RuleSet(None, chain(self, other))
RuleSet.register(list)
def scope(path, rules):
return RuleSet(path, rules)
rules = scope('host', [
('ipmi_authtype', -1),
('ipmi_available', 0),
('ipmi_privilege', 2),
('ipmi_username', ''),
('ipmi_password', ''),
('maintenance_status', 0),
('snmp_available', 0),
('status', 0),
scope('inventory', [
('inventory_mode', 0)
]),
])
rules += scope('host_prototype', [
('ipmi_authtype', -1),
('ipmi_available', 0),
('ipmi_privilege', 2),
('maintenance_status', 0),
('snmp_available', 0),
('status', 0),
scope('inventory', [
('inventory_mode', 0)
]),
])
rules += scope('item', [
('authtype', 0),
('data_type', 0),
('delta', 0),
('formula', 1),
('history', 90),
('inventory_link', 0),
('state', 0),
('status', 0),
('trends', 365),
('units', ''),
('snmpv3_authprotocol', 0),
('snmpv3_privprotocol', 0),
('multiplier', 0),
])
rules += scope('screen', [
# ('hsize', 1),
('vsize', 1)
])
rules += scope('screen_item', [
('dynamic', 0),
('elements', 25),
('halign', 0),
('height', 200),
('sort_triggers', 0),
('style', 0),
('valign', 0),
('width', 320),
# ('x', 0),
# ('y', 0),
('colspan', 1),
('rowspan', 1),
])
rules += scope('action', [
('recovery_msg', 0),
('status', 0),
scope('condition', [
('operator', 0)
]),
scope('operation', [
('esc_period', 0),
('esc_step_from', 1),
('esc_step_to', 1),
('evaltype', 0),
]),
scope('message', [
('default_msg', 0)
]),
scope('operation', [
('operator', 0)
])
])
rules += scope('graph', [
('type', 0),
('percent_left', 0.),
('percent_right', 0.),
('show_3d', 0),
('show_legend', 1),
('show_work_period', 1),
('show_triggers', 1),
('yaxismax', 100.0),
('yaxismin', 0.0),
('ymax_type', 0),
('ymin_type', 0),
('ymin_item_1', 0),
('ymax_item_1', 0)
])
rules += scope('graph_item', [
('calc_fnc', 2),
('drawtype', 0),
('sortorder', 0),
('type', 0),
('yaxisside', 0)
])
rules += scope('trigger', [
('priority', 0),
('state', 0),
('status', 0),
('type', 0),
('value', 0)
])
rules += scope('discovery_rule', [
('authtype', 0),
('lifetime', 30),
('snmpv3_authprotocol', 0),
('snmpv3_privprotocol', 0),
('state', 0),
('status', 0),
])
|
bsd-3-clause
| 6,384,678,967,305,997,000
| 19.107955
| 62
| 0.473298
| false
| 3.367269
| false
| false
| false
|
th3sys/capsule
|
push_items.py
|
1
|
1667
|
from __future__ import print_function # Python 2/3 compatibility
import boto3
import json
import uuid
import time
import decimal
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
sec_table = dynamodb.Table('Securities')
quotes_table = dynamodb.Table('Quotes.EOD')
with open("quotes.json") as json_file:
quotes = json.load(json_file, parse_float = decimal.Decimal)
for quote in quotes:
Symbol = quote['Symbol']
Date = str(time.time())
Source = quote['Source']
Details = quote['Details']
print("Adding quote:", Symbol, Date)
quotes_table.put_item(
Item={
'Symbol': Symbol,
'Date': Date,
'Source': Source,
'Details': Details
}
)
with open("securities.json") as json_file:
securities = json.load(json_file, parse_float = decimal.Decimal)
for security in securities:
Symbol = security['Symbol']
Broker = security['Broker']
ProductType = security['ProductType']
SubscriptionEnabled = bool(security['SubscriptionEnabled'])
TradingEnabled = bool(security['TradingEnabled'])
Description = security['Description']
Risk = security['Risk']
print("Adding security:", Symbol)
sec_table.put_item(
Item={
'Symbol': Symbol,
'Broker' : Broker,
'ProductType': ProductType,
'SubscriptionEnabled': SubscriptionEnabled,
'TradingEnabled':TradingEnabled,
'Description':Description,
'Risk':Risk
}
)
|
mit
| 7,072,675,700,951,598,000
| 28.767857
| 68
| 0.577684
| false
| 4.363874
| false
| false
| false
|
skosukhin/spack
|
var/spack/repos/builtin/packages/clamr/package.py
|
1
|
3013
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Clamr(CMakePackage):
"""The CLAMR code is a cell-based adaptive mesh refinement (AMR)
mini-app developed as a testbed for hybrid algorithm development
using MPI and OpenCL GPU code.
"""
homepage = "https://github.com/lanl/CLAMR"
url = "https://github.com/lanl/CLAMR.git"
tags = ['proxy-app']
version('master', git='https://github.com/lanl/CLAMR.git')
variant(
'graphics', default='opengl',
values=('opengl', 'mpe', 'none'),
description='Build with specified graphics support')
variant(
'precision', default='mixed',
values=('single', 'mixed', 'full'),
description='single, mixed, or full double precision values')
depends_on('mpi')
depends_on('mpe', when='graphics=mpe')
def cmake_args(self):
spec = self.spec
cmake_args = []
if 'graphics=none' in spec:
cmake_args.append('-DGRAPHICS_TYPE=None')
elif 'graphics=mpe' in spec:
cmake_args.append('-DGRAPHICS_TYPE=MPE')
else:
cmake_args.append('-DGRAPHICS_TYPE=OpenGL')
if 'precision=full' in spec:
cmake_args.append('-DPRECISION_TYPE=full_precision')
elif 'precision=single' in spec:
cmake_args.append('-DPRECISION_TYPE=minimum_precision')
else:
cmake_args.append('-DPRECISION_TYPE=mixed_precision')
# if MIC, then -DMIC_NATIVE=yes
return cmake_args
def install(self, spec, prefix):
install('README', prefix)
install('LICENSE', prefix)
install_tree('docs', join_path(prefix, 'docs'))
install_tree('tests', join_path(prefix, 'tests'))
with working_dir(self.build_directory):
make('install')
|
lgpl-2.1
| 4,936,657,519,546,583,000
| 37.628205
| 78
| 0.629605
| false
| 4.001328
| false
| false
| false
|
gsnbng/erpnext
|
erpnext/hr/report/vehicle_expenses/vehicle_expenses.py
|
2
|
3137
|
# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import erpnext
from frappe import _
from frappe.utils import flt,cstr
from erpnext.accounts.report.financial_statements import get_period_list
def execute(filters=None):
columns, data, chart = [], [], []
if filters.get('fiscal_year'):
company = erpnext.get_default_company()
period_list = get_period_list(filters.get('fiscal_year'), filters.get('fiscal_year'),
'', '', 'Fiscal Year', 'Monthly', company=company)
columns=get_columns()
data=get_log_data(filters)
chart=get_chart_data(data,period_list)
return columns, data, None, chart
def get_columns():
columns = [_("License") + ":Link/Vehicle:100", _('Create') + ":data:50",
_("Model") + ":data:50", _("Location") + ":data:100",
_("Log") + ":Link/Vehicle Log:100", _("Odometer") + ":Int:80",
_("Date") + ":Date:100", _("Fuel Qty") + ":Float:80",
_("Fuel Price") + ":Float:100",_("Fuel Expense") + ":Float:100",
_("Service Expense") + ":Float:100"
]
return columns
def get_log_data(filters):
fy = frappe.db.get_value('Fiscal Year', filters.get('fiscal_year'), ['year_start_date', 'year_end_date'], as_dict=True)
data = frappe.db.sql("""select
vhcl.license_plate as "License", vhcl.make as "Make", vhcl.model as "Model",
vhcl.location as "Location", log.name as "Log", log.odometer as "Odometer",
log.date as "Date", log.fuel_qty as "Fuel Qty", log.price as "Fuel Price",
log.fuel_qty * log.price as "Fuel Expense"
from
`tabVehicle` vhcl,`tabVehicle Log` log
where
vhcl.license_plate = log.license_plate and log.docstatus = 1 and date between %s and %s
order by date""" ,(fy.year_start_date, fy.year_end_date), as_dict=1)
dl=list(data)
for row in dl:
row["Service Expense"]= get_service_expense(row["Log"])
return dl
def get_service_expense(logname):
expense_amount = frappe.db.sql("""select sum(expense_amount)
from `tabVehicle Log` log,`tabVehicle Service` ser
where ser.parent=log.name and log.name=%s""",logname)
return flt(expense_amount[0][0]) if expense_amount else 0
def get_chart_data(data,period_list):
fuel_exp_data,service_exp_data,fueldata,servicedata = [],[],[],[]
service_exp_data = []
fueldata = []
for period in period_list:
total_fuel_exp=0
total_ser_exp=0
for row in data:
if row["Date"] <= period.to_date and row["Date"] >= period.from_date:
total_fuel_exp+=flt(row["Fuel Expense"])
total_ser_exp+=flt(row["Service Expense"])
fueldata.append([period.key,total_fuel_exp])
servicedata.append([period.key,total_ser_exp])
labels = [period.key for period in period_list]
fuel_exp_data= [row[1] for row in fueldata]
service_exp_data= [row[1] for row in servicedata]
datasets = []
if fuel_exp_data:
datasets.append({
'name': 'Fuel Expenses',
'values': fuel_exp_data
})
if service_exp_data:
datasets.append({
'name': 'Service Expenses',
'values': service_exp_data
})
chart = {
"data": {
'labels': labels,
'datasets': datasets
}
}
chart["type"] = "line"
return chart
|
agpl-3.0
| -586,803,672,256,891,600
| 33.855556
| 120
| 0.671023
| false
| 2.815978
| false
| false
| false
|
droundy/deft
|
papers/histogram/figs/plot-convergence.py
|
1
|
5111
|
#!/usr/bin/python2
import matplotlib, sys
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy
matplotlib.rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
matplotlib.rc('text', usetex=True)
import styles
if len(sys.argv) != 7:
print('useage: %s ww ff N min_T methods seed' % sys.argv[0])
exit(1)
ww = float(sys.argv[1])
#arg ww = [1.3, 1.5, 2.0, 3.0]
ff = float(sys.argv[2])
#arg ff = [0.3]
N = float(sys.argv[3])
#arg N = range(5,21)
min_T = eval(sys.argv[4])
#arg min_T = [0.1]
methods = eval(sys.argv[5])
#arg methods = [["wang_landau","simple_flat","tmmc","oetmmc"]]
seed = int(sys.argv[6])
#arg seed = [0]
# input: ["data/s%03d/periodic-ww%04.2f-ff%04.2f-N%i-%s-conv_T%g-%s.dat" % (seed, ww, ff, N, method, min_T, data) for method in methods for data in ["E","lnw"]]
max_T = 2
T_bins = 1e3
dT = max_T/T_bins
T_range = numpy.arange(dT, max_T, dT)
# make dictionaries which we can index by method name
U = {} # internal energy
CV = {} # heat capacity
S = {} # entropy
minlog = 0
for method in methods:
e_hist = numpy.loadtxt("data/s%03d/periodic-ww%04.2f-ff%04.2f-N%i-%s-conv_T%g-E.dat"
% (seed, ww, ff, N, method, min_T), ndmin=2)
lnw_hist = numpy.loadtxt("data/s%03d/periodic-ww%04.2f-ff%04.2f-N%i-%s-conv_T%g-lnw.dat"
% (seed, ww, ff, N, method, min_T), ndmin=2)
energy = -e_hist[:, 0] # array of energies
lnw = lnw_hist[e_hist[:, 0].astype(int), 1] # look up the lnw for each actual energy
ln_dos = numpy.log(e_hist[:, 1]) - lnw
log10w = lnw_hist[e_hist[:, 0].astype(int), 1]*numpy.log10(numpy.exp(1))
log10_dos = numpy.log10(e_hist[:, 1]) - log10w
log10_dos -= log10_dos.max()
if log10_dos.min() < minlog:
minlog = log10_dos.min()
plt.figure('dos')
plt.plot(energy, log10_dos, styles.dots(method), label=styles.title(method))
Z = numpy.zeros(len(T_range)) # partition function
U[method] = numpy.zeros(len(T_range)) # internal energy
CV[method] = numpy.zeros(len(T_range)) # heat capacity
S[method] = numpy.zeros(len(T_range)) # entropy
Z_inf = sum(numpy.exp(ln_dos - ln_dos.max()))
S_inf = sum(-numpy.exp(ln_dos - ln_dos.max())*(-ln_dos.max() - numpy.log(Z_inf))) / Z_inf
for i in range(len(T_range)):
ln_dos_boltz = ln_dos - energy/T_range[i]
dos_boltz = numpy.exp(ln_dos_boltz - ln_dos_boltz.max())
Z[i] = sum(dos_boltz)
U[method][i] = sum(energy*dos_boltz)/Z[i]
S[method][i] = sum(-dos_boltz*(-energy/T_range[i] - ln_dos_boltz.max() \
- numpy.log(Z[i])))/Z[i]
S[method][i] -= S_inf
CV[method][i] = sum((energy/T_range[i])**2*dos_boltz)/Z[i] - \
(sum(energy/T_range[i]*dos_boltz)/Z[i])**2
plt.figure('u')
plt.plot(T_range, U[method]/N, styles.plot(method), label=styles.title(method))
plt.figure('hc')
plt.plot(T_range, CV[method]/N, styles.plot(method), label=styles.title(method))
plt.figure('S')
plt.plot(T_range, S[method]/N, styles.plot(method), label=styles.title(method))
plt.figure('dos')
plt.ylim(minlog, 0)
locs, labels = plt.yticks()
def tentothe(n):
if n == 0:
return '1'
if n == 10:
return '10'
if int(n) == n:
return r'$10^{%d}$' % n
return r'$10^{%g}$' % n
newlabels = [tentothe(n) for n in locs]
plt.yticks(locs, newlabels)
plt.ylim(minlog, 0)
plt.xlabel('$U/N\epsilon$')
plt.ylabel('$DoS$')
plt.title('Density of states for $\lambda=%g$, $\eta=%g$, and $N=%i$'
' ($kT_{min}/\epsilon=%g$)' % (ww, ff, N, min_T))
plt.legend(loc='best')
plt.tight_layout(pad=0.2)
plt.savefig("figs/periodic-ww%02.0f-ff%02.0f-N%i-dos-conv-T%02.0f.pdf"
% (ww*100, ff*100, N, min_T*100))
plt.figure('u')
plt.title('Specific internal energy for $\lambda=%g$, $\eta=%g$, and $N=%i$'
' ($kT_{min}/\epsilon=%g$)' % (ww, ff, N, min_T))
plt.xlabel('$kT/\epsilon$')
plt.ylabel('$U/N\epsilon$')
plt.legend(loc='best')
plt.axvline(min_T, linewidth=1, color='k', linestyle=':')
plt.tight_layout(pad=0.2)
plt.savefig("figs/periodic-ww%02.0f-ff%02.0f-N%i-u-conv-T%02.0f.pdf"
% (ww*100, ff*100, N, min_T*100))
plt.figure('hc')
plt.title('Specific heat capacity for $\lambda=%g$, $\eta=%g$, and $N=%i$'
' ($kT_{min}/\epsilon=%g$)' % (ww, ff, N, min_T))
plt.ylim(0)
plt.xlabel('$kT/\epsilon$')
plt.ylabel('$C_V/Nk$')
plt.legend(loc='best')
plt.axvline(min_T, linewidth=1, color='k', linestyle=':')
plt.tight_layout(pad=0.2)
plt.savefig("figs/periodic-ww%02.0f-ff%02.0f-N%i-hc-conv-T%02.0f.pdf"
% (ww*100, ff*100, N, min_T*100))
plt.figure('S')
plt.title('Configurational entropy for $\lambda=%g$, $\eta=%g$, and $N=%i$'
' ($kT_{min}/\epsilon=%g$)' % (ww, ff, N, min_T))
plt.xlabel(r'$kT/\epsilon$')
plt.ylabel(r'$S_{\textit{config}}/Nk$')
plt.legend(loc='best')
plt.axvline(min_T, linewidth=1, color='k', linestyle=':')
plt.tight_layout(pad=0.2)
plt.savefig("figs/periodic-ww%02.0f-ff%02.0f-N%i-S-conv-T%02.0f.pdf"
% (ww*100, ff*100, N, min_T*100))
|
gpl-2.0
| -6,103,449,264,001,766,000
| 32.847682
| 160
| 0.586774
| false
| 2.447797
| false
| false
| false
|
eshijia/magnum
|
magnum/api/controllers/v1/baymodel.py
|
1
|
14815
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import glanceclient.exc
import novaclient.exceptions as nova_exc
import pecan
from pecan import rest
import wsme
from wsme import types as wtypes
from magnum.api.controllers import base
from magnum.api.controllers import link
from magnum.api.controllers.v1 import collection
from magnum.api.controllers.v1 import types
from magnum.api.controllers.v1 import utils as api_utils
from magnum.api import expose
from magnum.api import validation
from magnum.common import clients
from magnum.common import exception
from magnum.common import policy
from magnum import objects
class BayModelPatchType(types.JsonPatchType):
pass
class BayModel(base.APIBase):
"""API representation of a baymodel.
This class enforces type checking and value constraints, and converts
between the internal object model and the API representation of a baymodel.
"""
_coe = None
def _get_coe(self):
return self._coe
def _set_coe(self, value):
if value and self._coe != value:
self._coe = value
elif value == wtypes.Unset:
self._coe = wtypes.Unset
uuid = types.uuid
"""Unique UUID for this baymodel"""
name = wtypes.StringType(min_length=1, max_length=255)
"""The name of the bay model"""
coe = wsme.wsproperty(wtypes.text, _get_coe, _set_coe, mandatory=True)
"""The Container Orchestration Engine for this bay model"""
image_id = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255),
mandatory=True)
"""The image name or UUID to use as a base image for this baymodel"""
flavor_id = wtypes.StringType(min_length=1, max_length=255)
"""The flavor of this bay model"""
master_flavor_id = wtypes.StringType(min_length=1, max_length=255)
"""The flavor of the master node for this bay model"""
dns_nameserver = wtypes.IPv4AddressType()
"""The DNS nameserver address"""
keypair_id = wsme.wsattr(wtypes.StringType(min_length=1, max_length=255),
mandatory=True)
"""The name or id of the nova ssh keypair"""
external_network_id = wtypes.StringType(min_length=1, max_length=255)
"""The external network to attach the Bay"""
fixed_network = wtypes.StringType(min_length=1, max_length=255)
"""The fixed network name to attach the Bay"""
network_driver = wtypes.StringType(min_length=1, max_length=255)
"""The name of the driver used for instantiating container networks"""
apiserver_port = wtypes.IntegerType(minimum=1024, maximum=65535)
"""The API server port for k8s"""
docker_volume_size = wtypes.IntegerType(minimum=1)
"""The size in GB of the docker volume"""
ssh_authorized_key = wtypes.StringType(min_length=1)
"""The SSH Authorized Key"""
cluster_distro = wtypes.StringType(min_length=1, max_length=255)
"""The Cluster distro for the bay, ex - coreos, fedora-atomic."""
links = wsme.wsattr([link.Link], readonly=True)
"""A list containing a self link and associated baymodel links"""
http_proxy = wtypes.StringType(min_length=1, max_length=255)
"""http_proxy for the bay """
https_proxy = wtypes.StringType(min_length=1, max_length=255)
"""https_proxy for the bay """
no_proxy = wtypes.StringType(min_length=1, max_length=255)
"""Its comma separated list of ip for which proxies should not
used in the bay"""
registry_enabled = wsme.wsattr(types.boolean, default=False)
"""Indicates whether the docker registry is enabled"""
labels = wtypes.DictType(str, str)
"""One or more key/value pairs"""
insecure = wsme.wsattr(types.boolean, default=False)
"""Indicates whether the TLS should be disabled"""
def __init__(self, **kwargs):
self.fields = []
for field in objects.BayModel.fields:
# Skip fields we do not expose.
if not hasattr(self, field):
continue
self.fields.append(field)
setattr(self, field, kwargs.get(field, wtypes.Unset))
@staticmethod
def _convert_with_links(baymodel, url, expand=True):
if not expand:
baymodel.unset_fields_except(['uuid', 'name', 'image_id',
'apiserver_port', 'coe'])
baymodel.links = [link.Link.make_link('self', url,
'baymodels', baymodel.uuid),
link.Link.make_link('bookmark', url,
'baymodels', baymodel.uuid,
bookmark=True)]
return baymodel
@classmethod
def convert_with_links(cls, rpc_baymodel, expand=True):
baymodel = BayModel(**rpc_baymodel.as_dict())
return cls._convert_with_links(baymodel, pecan.request.host_url,
expand)
@classmethod
def sample(cls, expand=True):
sample = cls(
uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c',
name='example',
image_id='Fedora-k8s',
flavor_id='m1.small',
master_flavor_id='m1.small',
dns_nameserver='8.8.1.1',
keypair_id='keypair1',
external_network_id='ffc44e4a-2319-4062-bce0-9ae1c38b05ba',
fixed_network='private',
network_driver='libnetwork',
apiserver_port=8080,
docker_volume_size=25,
cluster_distro='fedora-atomic',
ssh_authorized_key='ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAB',
coe='kubernetes',
http_proxy='http://proxy.com:123',
https_proxy='https://proxy.com:123',
no_proxy='192.168.0.1,192.168.0.2,192.168.0.3',
labels={'key1': 'val1', 'key2': 'val2'},
created_at=datetime.datetime.utcnow(),
updated_at=datetime.datetime.utcnow())
return cls._convert_with_links(sample, 'http://localhost:9511', expand)
class BayModelCollection(collection.Collection):
"""API representation of a collection of baymodels."""
baymodels = [BayModel]
"""A list containing baymodels objects"""
def __init__(self, **kwargs):
self._type = 'baymodels'
@staticmethod
def convert_with_links(rpc_baymodels, limit, url=None, expand=False,
**kwargs):
collection = BayModelCollection()
collection.baymodels = [BayModel.convert_with_links(p, expand)
for p in rpc_baymodels]
collection.next = collection.get_next(limit, url=url, **kwargs)
return collection
@classmethod
def sample(cls):
sample = cls()
sample.baymodels = [BayModel.sample(expand=False)]
return sample
class BayModelsController(rest.RestController):
"""REST controller for BayModels."""
_custom_actions = {
'detail': ['GET'],
}
def _get_baymodels_collection(self, marker, limit,
sort_key, sort_dir, expand=False,
resource_url=None):
limit = api_utils.validate_limit(limit)
sort_dir = api_utils.validate_sort_dir(sort_dir)
marker_obj = None
if marker:
marker_obj = objects.BayModel.get_by_uuid(pecan.request.context,
marker)
baymodels = objects.BayModel.list(pecan.request.context, limit,
marker_obj, sort_key=sort_key,
sort_dir=sort_dir)
return BayModelCollection.convert_with_links(baymodels, limit,
url=resource_url,
expand=expand,
sort_key=sort_key,
sort_dir=sort_dir)
def _get_image_data(self, context, image_ident):
"""Retrieves os_distro and other metadata from the Glance image.
:param image_ident: image id or name of baymodel.
"""
try:
cli = clients.OpenStackClients(context)
return api_utils.get_openstack_resource(cli.glance().images,
image_ident, 'images')
except glanceclient.exc.NotFound:
raise exception.ImageNotFound(image_id=image_ident)
except glanceclient.exc.HTTPForbidden:
raise exception.ImageNotAuthorized(image_id=image_ident)
@policy.enforce_wsgi("baymodel")
@expose.expose(BayModelCollection, types.uuid,
types.uuid, int, wtypes.text, wtypes.text)
def get_all(self, baymodel_uuid=None, marker=None, limit=None,
sort_key='id', sort_dir='asc'):
"""Retrieve a list of baymodels.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
return self._get_baymodels_collection(marker, limit, sort_key,
sort_dir)
@policy.enforce_wsgi("baymodel")
@expose.expose(BayModelCollection, types.uuid,
types.uuid, int, wtypes.text, wtypes.text)
def detail(self, baymodel_uuid=None, marker=None, limit=None,
sort_key='id', sort_dir='asc'):
"""Retrieve a list of baymodels with detail.
:param baymodel_uuid: UUID of a baymodel, to get only baymodels for
that baymodel.
:param marker: pagination marker for large data sets.
:param limit: maximum number of resources to return in a single result.
:param sort_key: column to sort results by. Default: id.
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
"""
# NOTE(lucasagomes): /detail should only work agaist collections
parent = pecan.request.path.split('/')[:-1][-1]
if parent != "baymodels":
raise exception.HTTPNotFound
expand = True
resource_url = '/'.join(['baymodels', 'detail'])
return self._get_baymodels_collection(marker, limit,
sort_key, sort_dir, expand,
resource_url)
@policy.enforce_wsgi("baymodel", "get")
@expose.expose(BayModel, types.uuid_or_name)
def get_one(self, baymodel_ident):
"""Retrieve information about the given baymodel.
:param baymodel_ident: UUID or logical name of a baymodel.
"""
rpc_baymodel = api_utils.get_rpc_resource('BayModel', baymodel_ident)
return BayModel.convert_with_links(rpc_baymodel)
def check_keypair_exists(self, context, keypair):
"""Checks the existence of the keypair"""
cli = clients.OpenStackClients(context)
try:
cli.nova().keypairs.get(keypair)
except nova_exc.NotFound:
raise exception.KeyPairNotFound(keypair=keypair)
@policy.enforce_wsgi("baymodel", "create")
@expose.expose(BayModel, body=BayModel, status_code=201)
@validation.enforce_network_driver_types('flannel')
def post(self, baymodel):
"""Create a new baymodel.
:param baymodel: a baymodel within the request body.
"""
baymodel_dict = baymodel.as_dict()
context = pecan.request.context
self.check_keypair_exists(context, baymodel_dict['keypair_id'])
baymodel_dict['project_id'] = context.project_id
baymodel_dict['user_id'] = context.user_id
image_data = self._get_image_data(context, baymodel_dict['image_id'])
if image_data.get('os_distro'):
baymodel_dict['cluster_distro'] = image_data['os_distro']
else:
raise exception.OSDistroFieldNotFound(
image_id=baymodel_dict['image_id'])
new_baymodel = objects.BayModel(context, **baymodel_dict)
new_baymodel.create()
# Set the HTTP Location Header
pecan.response.location = link.build_url('baymodels',
new_baymodel.uuid)
return BayModel.convert_with_links(new_baymodel)
@policy.enforce_wsgi("baymodel", "update")
@wsme.validate(types.uuid, [BayModelPatchType])
@expose.expose(BayModel, types.uuid, body=[BayModelPatchType])
@validation.enforce_network_driver_types('flannel')
def patch(self, baymodel_uuid, patch):
"""Update an existing baymodel.
:param baymodel_uuid: UUID of a baymodel.
:param patch: a json PATCH document to apply to this baymodel.
"""
rpc_baymodel = objects.BayModel.get_by_uuid(pecan.request.context,
baymodel_uuid)
try:
baymodel_dict = rpc_baymodel.as_dict()
baymodel = BayModel(**api_utils.apply_jsonpatch(
baymodel_dict,
patch))
except api_utils.JSONPATCH_EXCEPTIONS as e:
raise exception.PatchError(patch=patch, reason=e)
# Update only the fields that have changed
for field in objects.BayModel.fields:
try:
patch_val = getattr(baymodel, field)
except AttributeError:
# Ignore fields that aren't exposed in the API
continue
if patch_val == wtypes.Unset:
patch_val = None
if rpc_baymodel[field] != patch_val:
rpc_baymodel[field] = patch_val
rpc_baymodel.save()
return BayModel.convert_with_links(rpc_baymodel)
@policy.enforce_wsgi("baymodel")
@expose.expose(None, types.uuid_or_name, status_code=204)
def delete(self, baymodel_ident):
"""Delete a baymodel.
:param baymodel_uuid: UUID or logical name of a baymodel.
"""
rpc_baymodel = api_utils.get_rpc_resource('BayModel', baymodel_ident)
rpc_baymodel.destroy()
|
apache-2.0
| -8,307,967,998,542,834,000
| 38.193122
| 79
| 0.60351
| false
| 4.007303
| false
| false
| false
|
nitish-tripathi/Simplery
|
ANN/Odin_10/Network.py
|
1
|
12776
|
"""
http://neuralnetworksanddeeplearning.com/chap1.html#implementing_our_network_to_classify_digits
http://numericinsight.com/uploads/A_Gentle_Introduction_to_Backpropagation.pdf
https://ayearofai.com/rohan-lenny-1-neural-networks-the-backpropagation-algorithm-explained-abf4609d4f9d
"""
#### Libraries
# Standard library
import sys
import random
import json
# Third-party libraries
import numpy as np
# Import inside Odin
from Cost import QuadraticCost, CrossEntropyCost
from Helpers import Helpers
class Network(object):
def __init__(self, model = None, sizes = None, eta = None, C = 0.0, cost = CrossEntropyCost, decrease_const = 0.0):
"""
Initializes artificial neural network classifier.
The biases and weights for the
network are initialized randomly, using a Gaussian
distribution with mean 0, and variance 1. Note that the first
layer is assumed to be an input layer, and by convention we
won't set any biases for those neurons, since biases are only
ever used in computing the outputs from later layers.
Parameters
---------
sizes: 1d array
Contains the number of neurons in the respective layers of
the network. For example, if the list was [2, 3, 1] then it
would be a three-layer network, with the first layer containing
2 neurons, the second layer 3 neurons, and the third layer 1 neuron.
eta: float
Learning rate
C: float
L2 parameterization. It is used to not allow the weights to become larger,
in order to avoid overfitting
cost: Cost class
Defines the cost calculation class, either CrossEntropyCost or
Quadratic cost
decrease_const : float (default: 0.0)
Decrease constant. Shrinks the learning rate
after each epoch via eta / (1 + epoch*decrease_const)
"""
if model != None:
self.load(model)
return
elif sizes == None:
raise NotImplementedError('Parameter sizes cannot be None')
return
np.random.seed()
self.sizes = sizes
self._num_layers = len(sizes)
self.biases = [np.random.randn(y, 1) for y in sizes[1:]]
self.weights = [np.random.randn(y, x)/np.sqrt(x)
for x, y in zip(sizes[:-1], sizes[1:])]
self._C = C
self._eta = eta
self._decrease_const = decrease_const
self.cost = cost
self.test_cost = []
def _feedforward(self, a):
"""Return the output of the network if ``a`` is input."""
for b, w in zip(self.biases, self.weights):
a = Helpers.sigmoid(np.dot(w, a)+b)
return a
def _feedforward2(self, a):
zs = []
activations = [a]
activation = a
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation) + b
zs.append(z)
activation = Helpers.sigmoid(z)
activations.append(activation)
return (zs, activations)
def fit(self, training_data, epochs, mini_batch_size=1,
test_data=None, calc_test_cost=False):
"""
Fit the model to the training data.
Train the neural network using mini-batch stochastic
gradient descent.
Parameters
---------
training_data: list of tuples (X, y)
X is input and y is desired output
epoch: int
Maximum number of iterations over the training dataset.
mini_batch_size: int (default: 1)
Divides training data into k minibatches for efficiency.
Normal gradient descent learning if k=1 (default).
test_data: list of tuples (X, y)
If provided then the network will be evaluated against the
test data after each epoch, and partial progress printed out.
This is useful for tracking progress, but slows things down
substantially.
"""
if test_data: n_test = len(test_data)
n = len(training_data)
self._nOut = training_data[0][1].shape[0]
for j in xrange(epochs):
# Adaptive learning rate
self._eta /= (1 + self._decrease_const*j)
# Randomly shuffling training data
random.shuffle(training_data)
# Partition training data into mini-batches of the appropriate size
mini_batches = [
training_data[k:k+mini_batch_size]
for k in xrange(0, n, mini_batch_size)]
# Then for each mini_batch we apply a single step of gradient descent
for mini_batch in mini_batches:
#self._update_mini_batch_old(mini_batch, eta)
self._update_mini_batch(mini_batch, n)
if test_data:
print "Epoch {0}: {1} / {2}".format(
j, self.evaluate(test_data), n_test, self._total_cost(test_data, True))
if calc_test_cost == True:
cost = self._total_cost(test_data, True)
self.test_cost.append(cost)
else:
#print "Epoch {0} complete".format(j)
sys.stderr.write('\rEpoch: %d/%d' % (j+1, epochs))
sys.stderr.flush()
print ""
def _update_mini_batch_old(self, mini_batch):
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
for x, y in mini_batch:
delta_nabla_b, delta_nabla_w = self._backpropagation(x, y)
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
self.weights = [w-(self._eta/len(mini_batch))*nw
for w, nw in zip(self.weights, nabla_w)]
self.biases = [b-(self._eta/len(mini_batch))*nb
for b, nb in zip(self.biases, nabla_b)]
def _update_mini_batch(self, mini_batch, n):
"""Update the network's weights and biases by applying
gradient descent using backpropagation to a single mini batch.
The ``mini_batch`` is a list of tuples ``(x, y)``, and ``eta``
is the learning rate."""
batch_size = len(mini_batch)
# transform to (input x batch_size) matrix
x = np.asarray([_x.ravel() for _x, _y in mini_batch]).transpose()
# transform to (output x batch_size) matrix
y = np.asarray([_y.ravel() for _x, _y in mini_batch]).transpose()
nabla_b, nabla_w = self._backpropagation(x, y)
self.weights = [(1-self._eta*(self._C/n))*w - (self._eta / batch_size) * nw for w, nw in zip(self.weights, nabla_w)]
self.biases = [b - (self._eta / batch_size) * nb for b, nb in zip(self.biases, nabla_b)]
return
def _backpropagation(self, x, y):
"""Return a tuple ``(nabla_b, nabla_w)`` representing the
gradient for the cost function C_x. ``nabla_b`` and
``nabla_w`` are layer-by-layer lists of numpy arrays, similar
to ``self.biases`` and ``self.weights``."""
nabla_b = [0 for i in self.biases]
nabla_w = [0 for i in self.weights]
# feedforward
zs, activations = self._feedforward2(x)
# backward pass
delta = self.cost.delta(zs[-1], activations[-1], y)
#delta = self._cost_derivative(activations[-1], y) * Helpers.sigmoid_prime(zs[-1])
nabla_b[-1] = delta.sum(1).reshape([len(delta), 1]) # reshape to (n x 1) matrix
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
for l in xrange(2, self._num_layers):
z = zs[-l]
sp = Helpers.sigmoid_prime(z)
delta = np.dot(self.weights[-l + 1].transpose(), delta) * sp
nabla_b[-l] = delta.sum(1).reshape([len(delta), 1]) # reshape to (n x 1) matrix
nabla_w[-l] = np.dot(delta, activations[-l - 1].transpose())
return (nabla_b, nabla_w)
def _backpropagation_old(self, x, y):
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
# feedforward
activation = x
activations = [x] # list to store all the activations, layer by layer
zs = [] # list to store all the z vectors, layer by layer
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation)+b
zs.append(z)
activation = Helpers.sigmoid(z)
activations.append(activation)
# backward pass
delta = self._cost_derivative(activations[-1], y) * \
Helpers.sigmoid_prime(zs[-1])
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
# Note that the variable l in the loop below is used a little
# differently to the notation in Chapter 2 of the book. Here,
# l = 1 means the last layer of neurons, l = 2 is the
# second-last layer, and so on. It's a renumbering of the
# scheme in the book, used here to take advantage of the fact
# that Python can use negative indices in lists.
for l in xrange(2, self._num_layers):
z = zs[-l]
sp = Helpers.sigmoid_prime(z)
delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
return (nabla_b, nabla_w)
def evaluate(self, test_data):
"""
Evaluate the test data.
Return the number of test inputs for which the neural
network outputs the correct result. Note that the neural
network's output is assumed to be the index of whichever
neuron in the final layer has the highest activation.
Parameters
---------
test_data: list of tuples (X, y)
X is input and y is desired output
"""
test_results = [(np.argmax(self._feedforward(x)), y)
for (x, y) in test_data]
return sum(int(x == y) for (x, y) in test_results)
def _cost_derivative(self, output_activations, y):
"""Return the vector of partial derivatives \partial C_x /
\partial a for the output activations."""
return (output_activations-y)
def _total_cost(self, data, convert=False):
"""Return the total cost for the data set ``data``. The flag
``convert`` should be set to False if the data set is the
training data (the usual case), and to True if the data set is
the validation or test data. See comments on the similar (but
reversed) convention for the ``accuracy`` method, above.
"""
cost = 0.0
for x, y in data:
a = self._feedforward(x)
if convert: y = self._vectorized_result(y)
cost += self.cost.fn(a, y)/len(data)
cost += 0.5*(self._C/len(data))*sum(
np.linalg.norm(w)**2 for w in self.weights)
return cost
def _vectorized_result(self, j):
"""Return a 10-dimensional unit vector with a 1.0 in the j'th position
and zeroes elsewhere. This is used to convert a digit (0...9)
into a corresponding desired output from the neural network.
"""
if j <= self._nOut:
e = np.zeros((self._nOut, 1))
e[j] = 1.0
return e
else:
return j
def save(self, filename='model'):
"""
Save the neural network to the file ``filename``.
"""
data = {"sizes": self.sizes,
"weights": [w.tolist() for w in self.weights],
"biases": [b.tolist() for b in self.biases],
"cost": str(self.cost.__name__),
"eta": self._eta,
"C": self._C}
f = open(filename, "w")
json.dump(data, f)
f.close()
def load(self, filename):
"""
Load a neural network from the file ``filename``.
Returns an instance of Network.
"""
f = open(filename, "r")
data = json.load(f)
f.close()
self.cost = getattr(sys.modules[__name__], data["cost"])
self.sizes = data["sizes"]
self.weights = [np.array(w) for w in data["weights"]]
self.biases = [np.array(b) for b in data["biases"]]
self._eta = data["eta"]
self._C = data["C"]
|
mit
| 369,818,851,461,477,000
| 37.251497
| 124
| 0.560269
| false
| 3.81829
| true
| false
| false
|
kevinjqiu/jirafs
|
jirafs/cmdline.py
|
1
|
4707
|
import argparse
import codecs
import logging
import os
import sys
import time
from blessings import Terminal
import six
from verlib import NormalizedVersion
from . import utils
from .exceptions import (
GitCommandError,
JiraInteractionFailed,
JirafsError,
NotTicketFolderException
)
# Write data to stdout as UTF-8 bytes when there's no encoding specified
if sys.version_info < (3, ) and sys.stdout.encoding is None:
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
logger = logging.getLogger(__name__)
def main():
term = Terminal()
if sys.version_info < (2, 7):
raise RuntimeError(
"Jirafs requires minimally version 2.7 of Python 2, or "
"any version of Python 3. Please upgrade your version of "
"python before using Jirafs."
)
if utils.get_git_version() < NormalizedVersion('1.8'):
raise RuntimeError(
"Jirafs requires minimally version 1.8 of Git. Please "
"upgrade your version of git before using Jirafs."
)
commands = utils.get_installed_commands()
parser = argparse.ArgumentParser(
description='Edit Jira issues locally from your filesystem',
add_help=False,
)
parser.add_argument(
'command',
type=six.text_type,
choices=commands.keys()
)
parser.add_argument(
'--subtasks',
action='store_true',
default=False
)
parser.add_argument(
'--log-level',
default=None,
dest='log_level',
)
args, extra = parser.parse_known_args()
if args.log_level is not None:
logging.basicConfig(level=logging.getLevelName(args.log_level))
command_name = args.command
cmd_class = commands[command_name]
# Subtasks
if args.subtasks:
cmd_class.RUN_FOR_SUBTASKS = True
started = time.time()
logger.debug(
'Command %s(%s) started',
command_name,
extra
)
jira = utils.lazy_get_jira()
try:
cmd_class.execute_command(
extra, jira=jira, path=os.getcwd(), command_name=command_name
)
except GitCommandError as e:
print(
u"{t.red}Error (code: {code}) while running git "
u"command.{t.normal}".format(
t=term,
code=e.returncode
)
)
print("")
print(u"{t.red}Command:{t.normal}{t.red}{t.bold}".format(t=term))
print(u" {cmd}".format(cmd=e.command))
print(u"{t.normal}".format(t=term))
print(u"{t.red}Output:{t.normal}{t.red}{t.bold}".format(t=term))
for line in e.output.decode('utf8').split('\n'):
print(u" %s" % line)
print(u"{t.normal}".format(t=term))
sys.exit(10)
except NotTicketFolderException:
if not getattr(cmd_class, 'TRY_SUBFOLDERS', False):
print(
u"{t.red}The command '{cmd}' must be ran from "
u"within an issue folder.{t.normal}".format(
t=term,
cmd=command_name
)
)
sys.exit(20)
count_runs = 0
for folder in os.listdir(os.getcwd()):
try:
cmd_class.execute_command(
extra,
jira=jira,
path=os.path.join(
os.getcwd(),
folder,
),
command_name=command_name,
)
count_runs += 1
except NotTicketFolderException:
pass
if count_runs == 0:
print(
u"{t.red}The command '{cmd}' must be ran from "
u"within an issue folder or from within a folder containing "
u"issue folders.{t.normal}".format(
t=term,
cmd=command_name
)
)
sys.exit(21)
except JiraInteractionFailed as e:
print(
u"{t.red}JIRA was unable to satisfy your "
u"request: {t.normal}{t.red}{t.bold}{error}{t.normal}".format(
t=term,
error=str(e)
)
)
sys.exit(80)
except JirafsError as e:
print(
u"{t.red}Jirafs encountered an error processing your "
u"request: {t.normal}{t.red}{t.bold}{error}{t.normal}".format(
t=term,
error=str(e)
)
)
sys.exit(90)
logger.debug(
'Command %s(%s) finished in %s seconds',
command_name,
extra,
(time.time() - started)
)
|
mit
| 8,821,853,478,044,909,000
| 27.70122
| 77
| 0.5273
| false
| 3.972152
| false
| false
| false
|
phamtrisi/metapp2
|
manage.py
|
1
|
2515
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import subprocess
from flask.ext.script import Manager, Shell, Server
from flask.ext.migrate import MigrateCommand
from metapp2.app import create_app
from metapp2.user.models import User
from metapp2.meeting.models import Meeting
from metapp2.meeting_user.models import Meeting_User
from metapp2.meeting_user_type.models import Meeting_User_Type
from metapp2.meeting_purpose.models import Meeting_Purpose
from metapp2.meeting_note.models import Meeting_Note
from metapp2.meeting_material.models import Meeting_Material
from metapp2.meeting_decision.models import Meeting_Decision
from metapp2.meeting_agenda.models import Meeting_Agenda
from metapp2.meeting_agenda_item.models import Meeting_Agenda_Item
from metapp2.meeting_agenda_item_user.models import Meeting_Agenda_Item_User
from metapp2.meeting_action_item.models import Meeting_Action_Item
from metapp2.meeting_action_item_user.models import Meeting_Action_Item_User
from metapp2.group.models import Group
from metapp2.group_user.models import Group_User
from metapp2.settings import DevConfig, ProdConfig
from metapp2.database import db
from metapp2 import app
HERE = os.path.abspath(os.path.dirname(__file__))
TEST_PATH = os.path.join(HERE, 'tests')
manager = Manager(app)
def _make_context():
"""Return context dict for a shell session so you can access
app, db, and the User model by default.
"""
return {'app': app, 'db': db, 'User': User, 'Meeting': Meeting, 'Meeting_Purpose': Meeting_Purpose, 'Meeting_Note': Meeting_Note, 'Meeting_Material': Meeting_Material, 'Meeting_Decision': Meeting_Decision,'Meeting_Action_Item': Meeting_Action_Item, 'Group': Group, 'Group_User': Group_User, 'Meeting_User': Meeting_User, 'Meeting_User_Type': Meeting_User_Type, 'Meeting_Action_Item_User': Meeting_Action_Item_User, 'Meeting_Agenda': Meeting_Agenda, 'Meeting_Agenda_Item': Meeting_Agenda_Item, 'Meeting_Agenda_Item_User': Meeting_Agenda_Item_User}
@manager.command
def test():
"""Run the tests."""
import pytest
exit_code = pytest.main([TEST_PATH, '--verbose'])
return exit_code
@manager.command
def create_db():
db.create_all()
@manager.command
def drop_db():
db.drop_all()
@manager.command
def run():
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
manager.add_command('shell', Shell(make_context=_make_context))
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
bsd-3-clause
| -5,898,075,227,168,820,000
| 37.106061
| 550
| 0.754672
| false
| 3.104938
| true
| false
| false
|
mogillc/theo
|
software/TheoTest/motor.py
|
1
|
1505
|
import curses
import mraa
import time
stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
stdscr.keypad(1)
stdscr.addstr(0,0,"Press 'q' to quit\n")
stdscr.refresh()
def set_motor(chan,en,phase):
if(chan == 0):
gpio = mraa.Gpio(21)
gpio.dir(mraa.DIR_OUT)
gpio.write(phase)
gpio = mraa.Gpio(0)
gpio.dir(mraa.DIR_OUT)
gpio.write(en)
elif(chan == 1):
gpio = mraa.Gpio(20)
gpio.dir(mraa.DIR_OUT)
gpio.write(phase)
gpio = mraa.Gpio(14)
gpio.dir(mraa.DIR_OUT)
gpio.write(en)
def init_motor():
gpio = mraa.Gpio(31)
gpio.dir(mraa.DIR_OUT)
gpio.write(1)
# setting motor to run in Enable/Phase mode
set_motor(0,0,0)
set_motor(1,0,0)
def move_forward():
set_motor(0,1,0)
set_motor(1,0,0)
def move_backward():
set_motor(0,1,1)
set_motor(1,0,0)
def turn_left():
set_motor(0,0,0)
set_motor(1,1,0)
def turn_right():
set_motor(0,0,0)
set_motor(1,1,1)
def updateMotor(key):
if(key==ord('w') or key==curses.KEY_UP):
stdscr.addstr(1,0,"Forward ")
move_forward()
elif(key==ord('s') or key==curses.KEY_DOWN):
stdscr.addstr(1,0,"Backward")
move_backward()
elif(key==ord('a') or key==curses.KEY_LEFT):
stdscr.addstr(1,0,"Left ")
turn_left()
elif(key==ord('d') or key==curses.KEY_RIGHT):
stdscr.addstr(1,0,"Righ ")
turn_right()
elif(key==ord(' ')):
stdscr.addstr(1,0,"Stop ")
init_motor()
init_motor()
key = ''
while key != ord('q'):
key = stdscr.getch()
#stdscr.addch(1,2,key)
#stdscr.refresh()
updateMotor(key)
curses.endwin()
|
apache-2.0
| -8,672,312,592,866,148,000
| 17.13253
| 46
| 0.642525
| false
| 2.134752
| false
| false
| false
|
Affirm/cabot
|
cabot/metricsapp/models/grafana.py
|
1
|
5474
|
import logging
import requests
import urlparse
from django.core.exceptions import ValidationError
from django.db import models
from cabot.metricsapp import defs
from cabot.metricsapp.api import get_series_ids, get_panel_url
logger = logging.getLogger(__name__)
class GrafanaInstance(models.Model):
class Meta:
app_label = 'metricsapp'
name = models.CharField(
unique=True,
max_length=30,
help_text='Unique name for Grafana site.'
)
url = models.CharField(
max_length=100,
help_text='Url of Grafana site.'
)
api_key = models.CharField(
max_length=100,
help_text='Grafana API token for authentication (http://docs.grafana.org/http_api/auth/).'
)
sources = models.ManyToManyField(
'MetricsSourceBase',
through='GrafanaDataSource',
help_text='Metrics sources used by this Grafana site.'
)
_sessions = dict()
def __unicode__(self):
return self.name
def clean(self, *args, **kwargs):
"""Make sure the input url/api key work"""
response = self.get_request('api/search')
try:
response.raise_for_status()
except requests.exception.HTTPError:
raise ValidationError('Request to Grafana API failed.')
@property
def session(self):
"""A requests.session object with the correct authorization headers"""
session = self._sessions.get(self.api_key)
if session is None:
session = requests.Session()
session.headers.update({'Authorization': 'Bearer {}'.format(self.api_key)})
self._sessions[self.api_key] = session
return session
def get_request(self, uri=''):
"""Make a request to the Grafana instance"""
return self.session.get(urlparse.urljoin(self.url, uri), timeout=defs.GRAFANA_REQUEST_TIMEOUT_S)
class GrafanaDataSource(models.Model):
"""
Intermediate model to match the name of a data source in a Grafana instance
with the corresponding MetricsDataSource
"""
class Meta:
app_label = 'metricsapp'
grafana_source_name = models.CharField(
max_length=30,
help_text='The name for a data source in grafana (e.g. metrics-stage")'
)
grafana_instance = models.ForeignKey('GrafanaInstance', on_delete=models.CASCADE)
metrics_source_base = models.ForeignKey('MetricsSourceBase', on_delete=models.CASCADE)
def __unicode__(self):
return '{} ({}, {})'.format(self.grafana_source_name, self.metrics_source_base.name,
self.grafana_instance.name)
class GrafanaPanel(models.Model):
"""
Data about a Grafana panel.
"""
class Meta:
app_label = 'metricsapp'
@property
def modifiable_url(self):
"""Url with modifiable time range, dashboard link, etc"""
if self.panel_url:
return '{}&fullscreen'.format(self.panel_url.replace('dashboard-solo', 'dashboard'))
return None
def get_rendered_image(self):
"""Get a .png image of this panel"""
# GrafanaInstance.get_request only takes the path
panel_url = self.panel_url.replace(urlparse.urljoin(self.grafana_instance.url, '/'), '')
rendered_image_url = urlparse.urljoin('render/', panel_url)
rendered_image_url = '{}&width={}&height={}'.format(rendered_image_url,
defs.GRAFANA_RENDERED_IMAGE_WIDTH,
defs.GRAFANA_RENDERED_IMAGE_HEIGHT)
# Unfortunately "$__all" works for the normal image but not render
rendered_image_url = rendered_image_url.replace('$__all', 'All')
try:
image_request = self.grafana_instance.get_request(rendered_image_url)
image_request.raise_for_status()
return image_request.content
except requests.exceptions.RequestException:
logger.error('Failed to get Grafana panel image')
return None
grafana_instance = models.ForeignKey('GrafanaInstance', on_delete=models.CASCADE)
dashboard_uri = models.CharField(max_length=100)
panel_id = models.IntegerField()
series_ids = models.CharField(max_length=50)
selected_series = models.CharField(max_length=50)
panel_url = models.CharField(max_length=2500, null=True)
def build_grafana_panel_from_session(session):
"""Returns an (unsaved!) GrafanaPanel model instance for use with rendering or to save to the DB"""
grafana_panel = GrafanaPanel()
set_grafana_panel_from_session(grafana_panel, session)
return grafana_panel
def set_grafana_panel_from_session(grafana_panel, session):
"""
Update a GrafanaPanel model with data based on session vars
Note that this does not update the DB - call grafana_panel.save() yourself if you want to do that
"""
instance = GrafanaInstance.objects.get(id=session['instance_id'])
dashboard_uri = session['dashboard_uri']
panel_url = get_panel_url(instance.url, dashboard_uri, session['panel_id'], session['templating_dict'])
grafana_panel.grafana_instance = instance
grafana_panel.dashboard_uri = dashboard_uri
grafana_panel.panel_id = int(session['panel_id'])
grafana_panel.series_ids = get_series_ids(session['panel_info'])
grafana_panel.selected_series = '_'.join(session['series'])
grafana_panel.panel_url = panel_url
|
mit
| -6,052,379,822,475,333,000
| 35.493333
| 107
| 0.647059
| false
| 3.879518
| false
| false
| false
|
qdzzyb2014/flask-weibo
|
app/forms.py
|
1
|
1550
|
from flask.ext.wtf import Form
from wtforms import TextField, BooleanField, TextAreaField, PasswordField
from wtforms.validators import Required, Length, Email
class LoginForm(Form):
user_name = TextField('user_name', validators = [Required()])
password = PasswordField('password', validators = [Required()])
remember_me = BooleanField('remember_me', default = False)
class SignUpForm(Form):
user_name = TextField('user_name', validators = [Required()])
password = PasswordField('password', validators = [Required()])
user_email = TextField('user_email', validators = [Email(), Required()])
class EditForm(Form):
nickname = TextField('nickname', validators = [Required()])
about_me = TextAreaField('about_me', validators = [Length(min = 0, max = 140)])
def __init__(self, original_nickname, *args, **kwargs):
Form.__init__(self, *args, **kwargs)
self.original_nickname = original_nickname
def validate(self):
if not Form.validate(self):
return False
if self.nickname.data == self.original_nickname:
return True
user = User.query.filter_by(nickname = self.nickname.data).first()
if user != None:
self.nickname.errors.append('This nickname is already in use. Please choose another one.')
return False
return True
class PostForm(Form):
post = TextField('post', validators = [Required()])
class SearchForm(Form):
search = TextField('search', validators = [Required()])
|
bsd-3-clause
| -3,858,252,173,252,549,000
| 38.74359
| 102
| 0.652903
| false
| 4.189189
| false
| false
| false
|
QEF/postqe
|
postqe/bands.py
|
1
|
4666
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Functions to calculate the electronic band structure.
Note: no symmetry recognition is implemented yet.
"""
import numpy as np
from math import fabs, sqrt
from postqe.xmlfile import get_cell_data, get_calculation_data, get_band_strucure_data
from postqe.constants import ev_to_ry
def compute_bands(xmlfile, filebands='filebands', spin_component=''):
"""
"""
ibrav, alat, a, b, nat, ntyp, atomic_positions, atomic_species = get_cell_data(xmlfile)
prefix, outdir, ecutwfc, ecutrho, functional, lsda, noncolin, pseudodir, nr, nr_smooth = \
get_calculation_data(xmlfile)
nks, nbnd, ks_energies = get_band_strucure_data(xmlfile)
# open output file
fout = open(filebands, "w")
fout.write("& plot nbnd = "+str(nbnd)+" nks = "+str(nks)+" /\n")
kpoints = np.zeros((nks, 3))
bands = np.zeros((nks, nbnd))
if lsda: # magnetic
for i in range(0, nks):
kpoints[i] = ks_energies[i]['k_point']['$']
fout.write(12 * ' ' + ' {:.6E}'.format(kpoints[i,0]) + ' {:.6E}'.format(kpoints[i,1]) + ' {:.6E}\n'.format(kpoints[i,2]))
if (spin_component==1): # get bands for spin up
for j in range(0, nbnd // 2):
bands[i,j] = ks_energies[i]['eigenvalues'][j] * 2 * nat / ev_to_ry # eigenvalue at k-point i, band j
fout.write(' {:.3E}'.format(bands[i,j]))
else: # get bands for spin down
for j in range(nbnd // 2, nbnd):
bands[i, j] = ks_energies[i]['eigenvalues'][j] * 2 * nat / ev_to_ry # eigenvalue at k-point i, band j
fout.write(' {:.3E}'.format(bands[i,j]))
fout.write('\n')
else: # non magnetic
for i in range(0, nks):
kpoints[i] = ks_energies[i]['k_point']['$']
fout.write(12 * ' ' + ' {:.6E}'.format(kpoints[i,0]) + ' {:.6E}'.format(kpoints[i,1]) + ' {:.6E}\n'.format(kpoints[i,2]))
for j in range(0, nbnd):
bands[i, j] = ks_energies[i]['eigenvalues'][j] * nat / ev_to_ry # eigenvalue at k-point i, band j
fout.write(' {:.3E}'.format(bands[i,j]))
fout.write('\n')
return kpoints, bands
def set_high_symmetry_points(kpoints):
"""
Determines which k-points have "high simmetry" and are at the boundaries of the Brillouin zone.
:param kpoints: a matrix (nks,3) with the k-points coordinates. nks is the number of k-points.
:return high_sym: an array of nks booleans, True if the kpoint is a high symmetry one
"""
nks = kpoints.shape[0]
high_sym = np.full(nks,False,dtype=bool)
high_sym[0] = True
high_sym[nks-1] = True
k1 = np.zeros(3)
k2 = np.zeros(3)
for i in range(1,nks-1):
if np.dot(kpoints[i,:],kpoints[i,:]) < 1.e-9: # the Gamma point is always a high symmetry one
high_sym[i] = True
else:
k1 = kpoints[i,:] - kpoints[i-1,:]
k2 = kpoints[i+1,:] - kpoints[i,:]
ps = np.dot(k1,k2) / sqrt(np.dot(k1,k1)) / sqrt(np.dot(k2,k2))
if fabs(ps-1.0) > 1.0e-4 :
high_sym[i] = True
return high_sym
def compute_kx(kpoints):
"""
This functions "linearize" the path along the k-points list in input and calculate
the linear x variable kx for the plot.
:param kpoints: a matrix (nks,3) with the k-points coordinates. nks is the number of k-points.
:return kx : linear x variable for the plot determined as the k-points path
"""
nks = kpoints.shape[0]
kx = np.zeros(nks)
ktemp = kpoints[2, :] - kpoints[1, :]
dxmod_save = sqrt (np.dot(ktemp, ktemp))
for i in range(1,nks):
ktemp = kpoints[i, :] - kpoints[i-1, :]
dxmod = sqrt(np.dot(ktemp, ktemp))
if dxmod > 5*dxmod_save: # a big jump in dxmod is a sign the points kpoints[i] and kpoints[i]
# are quite distant and belong to two different lines. We put them on
# the same point in the graph
kx[i] = kx[i-1]
elif dxmod > 1.e-5: # this is the usual case. The two points kpoints[i] and kpoints[i] are in the
# same path.
kx[i] = kx[i-1] + dxmod
dxmod_save = dxmod
else: # ! This is the case in which dxmod is almost zero. The two points coincide
# in the graph, but we do not save dxmod.
kx[i] = kx[i-1] + dxmod
return kx
|
lgpl-2.1
| 3,263,816,706,830,989,300
| 39.224138
| 133
| 0.545649
| false
| 3.12107
| false
| false
| false
|
tompecina/legal
|
legal/uds/views.py
|
1
|
11475
|
# -*- coding: utf-8 -*-
#
# uds/views.py
#
# Copyright (C) 2011-19 Tomáš Pecina <tomas@pecina.cz>
#
# This file is part of legal.pecina.cz, a web-based toolbox for lawyers.
#
# This application is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from datetime import datetime
from csv import writer as csvwriter
from json import dump
from os.path import join
from django.shortcuts import redirect, HttpResponse
from django.views.decorators.http import require_http_methods
from django.views.decorators.gzip import gzip_page
from django.apps import apps
from django.urls import reverse
from django.http import QueryDict, Http404
from legal.common.glob import (
INERR, TEXT_OPTS_KEYS, REPO_URL, EXLIM_TITLE, FTLIM_TITLE, LOCAL_SUBDOMAIN, LOCAL_URL, DTF, ODP)
from legal.common.utils import Pager, new_xml, xml_decorate, LOGGER, render
from legal.uds.forms import MainForm
from legal.uds.models import Agenda, Document, DocumentIndex, File
APP = __package__.rpartition('.')[2]
APPVERSION = apps.get_app_config(APP).version
BATCH = 50
REPO_PREFIX = join(REPO_URL, APP)
EXLIM = 1000
FTLIM = 1000
assert FTLIM <= EXLIM
@require_http_methods(('GET', 'POST'))
def mainpage(request):
LOGGER.debug('Main page accessed using method {}'.format(request.method), request, request.POST)
err_message = ''
page_title = apps.get_app_config(APP).verbose_name
agendas = Agenda.objects.all().order_by('desc')
if request.method == 'GET':
form = MainForm()
return render(
request,
'uds_mainpage.xhtml',
{'app': APP,
'page_title': page_title,
'err_message': err_message,
'agendas': agendas,
'form': form})
form = MainForm(request.POST)
if form.is_valid():
cld = form.cleaned_data
query = QueryDict(mutable=True)
for key in cld:
if cld[key]:
query[key] = cld[key]
query['start'] = 0
del query['format']
return redirect('{}?{}'.format(reverse('{}:{}list'.format(APP, cld['format'])), query.urlencode()))
err_message = INERR
LOGGER.debug('Invalid form', request)
return render(
request,
'uds_mainpage.xhtml',
{'app': APP,
'page_title': page_title,
'err_message': err_message,
'agendas': agendas,
'form': form})
def g2p(reqd):
par = {}
if 'publisher' in reqd:
par['publisher_id'] = reqd['publisher']
lims = {
'senate': 0,
'number': 1,
'year': 1970,
'page': 1,
'agenda': 1,
'id': 1,
}
for fld in lims:
if fld in reqd:
par[fld] = npar = int(reqd[fld])
assert npar >= lims[fld]
if 'register' in reqd:
par['register'] = reqd['register'].upper()
if 'date_posted_from' in reqd:
par['posted__gte'] = datetime.strptime(reqd['date_posted_from'], DTF).date()
if 'date_posted_to' in reqd:
par['posted__lt'] = datetime.strptime(reqd['date_posted_to'], DTF).date() + ODP
if 'text' in reqd:
par['text__search'] = reqd['text']
return par
@require_http_methods(('GET',))
def htmllist(request):
LOGGER.debug('HTML list accessed', request, request.GET)
reqd = request.GET.copy()
try:
par = g2p(reqd)
start = int(reqd['start']) if 'start' in reqd else 0
assert start >= 0
docins = DocumentIndex.objects.using('sphinx').filter(**par).order_by('-posted', 'id')
total = docins.count()
if total and start >= total:
start = total - 1
if start >= FTLIM:
if 'text' in reqd:
return render(
request,
'ftlim.xhtml',
{'app': APP,
'page_title': FTLIM_TITLE,
'limit': FTLIM,
'back': reverse('uds:mainpage')})
docs = Document.objects.filter(**par).order_by('-posted', 'id').distinct()
total = docs.count()
if total and start >= total:
start = total - 1
docs = docs[start:(start + BATCH)]
else:
docins = list(docins[start:(start + BATCH)].values_list('id', flat=True))
docs = Document.objects.filter(id__in=docins).order_by('-posted', 'id').distinct()
for doc in docs:
doc.files = File.objects.filter(document=doc).order_by('fileid').distinct()
idx = 1
for file in doc.files:
file.brk = idx % 5 == 0
idx += 1
except:
raise Http404
return render(
request,
'uds_list.xhtml',
{'app': APP,
'page_title': 'Výsledky vyhledávání',
'rows': docs,
'pager': Pager(start, total, reverse('uds:htmllist'), reqd, BATCH),
'total': total,
'noindex': True})
@gzip_page
@require_http_methods(('GET',))
def xmllist(request):
LOGGER.debug('XML list accessed', request, request.GET)
reqd = request.GET.copy()
try:
par = g2p(reqd)
resins = DocumentIndex.objects.using('sphinx').filter(**par).order_by('posted', 'id')
except:
raise Http404
total = resins.count()
if total > EXLIM:
return render(
request,
'exlim.xhtml',
{'app': APP,
'page_title': EXLIM_TITLE,
'limit': EXLIM,
'total': total,
'back': reverse('uds:mainpage')})
resins = list(resins.values_list('id', flat=True))
res = Document.objects.filter(id__in=resins).order_by('posted', 'id').distinct()
doc = {
'documents': {
'xmlns': 'http://' + LOCAL_SUBDOMAIN,
'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'xsi:schemaLocation': 'http://{} {}/static/{}-{}.xsd'.format(LOCAL_SUBDOMAIN, LOCAL_URL, APP, APPVERSION),
'application': APP,
'version': APPVERSION,
'created': datetime.now().replace(microsecond=0).isoformat()
}
}
xml = new_xml('')
tag_documents = xml_decorate(xml.new_tag('documents'), doc)
xml.append(tag_documents)
for item in res:
tag_document = xml.new_tag('document')
tag_documents.append(tag_document)
tag_document['id'] = item.docid
tag_publisher = xml.new_tag('publisher')
tag_document.append(tag_publisher)
tag_publisher['id'] = item.publisher.pubid
tag_publisher.append(item.publisher.name)
tag_ref = xml.new_tag('ref')
tag_document.append(tag_ref)
tag_ref.append(item.ref)
tag_description = xml.new_tag('description')
tag_document.append(tag_description)
tag_description.append(item.desc)
tag_agenda = xml.new_tag('agenda')
tag_document.append(tag_agenda)
tag_agenda.append(item.agenda.desc)
tag_posted = xml.new_tag('posted')
tag_document.append(tag_posted)
tag_posted.append(item.posted.isoformat())
tag_files = xml.new_tag('files')
tag_document.append(tag_files)
for fil in File.objects.filter(document=item).order_by('fileid').distinct():
tag_file = xml.new_tag('file')
tag_files.append(tag_file)
tag_file['id'] = fil.fileid
tag_name = xml.new_tag('name')
tag_file.append(tag_name)
tag_name.append(fil.name)
tag_url = xml.new_tag('url')
tag_file.append(tag_url)
tag_url.append(join(REPO_PREFIX, str(fil.fileid), fil.name))
response = HttpResponse(
str(xml).encode('utf-8') + b'\n',
content_type='text/xml; charset=utf-8')
response['Content-Disposition'] = 'attachment; filename=Dokumenty.xml'
return response
@gzip_page
@require_http_methods(('GET',))
def csvlist(request):
LOGGER.debug('CSV list accessed', request, request.GET)
reqd = request.GET.copy()
try:
par = g2p(reqd)
resins = DocumentIndex.objects.using('sphinx').filter(**par).order_by('posted', 'id')
except:
raise Http404
total = resins.count()
if total > EXLIM:
return render(
request,
'exlim.xhtml',
{'app': APP,
'page_title': EXLIM_TITLE,
'limit': EXLIM,
'total': total,
'back': reverse('uds:mainpage')})
resins = list(resins.values_list('id', flat=True))
res = Document.objects.filter(id__in=resins).order_by('posted', 'id').distinct()
response = HttpResponse(content_type='text/csv; charset=utf-8')
response['Content-Disposition'] = 'attachment; filename=Dokumenty.csv'
writer = csvwriter(response)
hdr = (
'Datum vyvěšení',
'Soud/státní zastupitelství',
'Popis dokumentu',
'Spisová značka/číslo jednací',
'Agenda',
'Soubory',
)
writer.writerow(hdr)
for item in res:
files = File.objects.filter(document=item).order_by('fileid').distinct()
dat = (
'{:%d.%m.%Y}'.format(item.posted),
item.publisher.name,
item.desc,
item.ref,
item.agenda.desc,
';'.join([join(REPO_PREFIX, str(fil.fileid), fil.name) for fil in files]),
)
writer.writerow(dat)
return response
@gzip_page
@require_http_methods(('GET',))
def jsonlist(request):
LOGGER.debug('JSON list accessed', request, request.GET)
reqd = request.GET.copy()
try:
par = g2p(reqd)
resins = DocumentIndex.objects.using('sphinx').filter(**par).order_by('posted', 'id')
except:
raise Http404
total = resins.count()
if total > EXLIM:
return render(
request,
'exlim.xhtml',
{'app': APP,
'page_title': EXLIM_TITLE,
'limit': EXLIM,
'total': total,
'back': reverse('uds:mainpage')})
resins = list(resins.values_list('id', flat=True))
res = Document.objects.filter(id__in=resins).order_by('posted', 'id').distinct()
response = HttpResponse(content_type='application/json; charset=utf-8')
response['Content-Disposition'] = 'attachment; filename=Dokumenty.json'
lst = []
for item in res:
files = File.objects.filter(document=item).order_by('fileid').distinct()
lst.append({
'posted': item.posted.isoformat(),
'publisher': item.publisher.name,
'desc': item.desc,
'ref': item.ref,
'agenda': item.agenda.desc,
'files': [{
'id': f.fileid,
'name': f.name,
'url': join(REPO_PREFIX, str(f.fileid), f.name)}
for f in files],
})
dump(lst, response)
return response
|
gpl-3.0
| 4,609,566,359,525,702,000
| 32.601173
| 118
| 0.575929
| false
| 3.571696
| false
| false
| false
|
axiros/transcrypt
|
make.py
|
1
|
1049
|
#!/usr/bin/env python
print '''
we require an index.html.tmpl next to us in this folder, into which we put the content
of pandoc generated raw show.html, then write index.html with the result.
'''
from time import sleep
import os
if __name__ == '__main__':
oldstat = 0
print 'looping, checking changes of show.markdown'
while True:
stat = os.stat('./show.markdown')
if stat == oldstat:
sleep(1)
continue
oldstat = stat
os.system('pandoc show.markdown -o show.html -s -V "theme:black" -t revealjs')
# now take a hammer:
t = open('./index.html.tmpl').read()
with open('./show.html') as fd:
s = fd.read()
title = s.split('<title>', 1)[1].split('</title')[0]
body = s.split('<body>', 1)[1].split('<script ')[0]
t = t.replace('_TITLE_', title).replace('_CONTENT_', body)
open('./index.html', 'w').write(t)
os.system('./safari_reload.sh')
#os.system('hg addremove * && hg commit -m "`date`" &')
|
apache-2.0
| 3,437,461,641,889,378,300
| 32.83871
| 87
| 0.561487
| false
| 3.462046
| false
| false
| false
|
gibbon-joel/metahive
|
bin/import-to-hive.py
|
1
|
28520
|
#!/usr/bin/python
import os, sys
import hashlib
import MySQLdb
import MySQLdb.cursors
from datetime import datetime
import time
import shutil
import magic
import argparse
import re
sys.path.append('%s/../lib' %(os.path.dirname(__file__)))
import metahivesettings.settings
#from metahive.scanners mport *
import metahive.scanners
regScan = {}
scannersByMimetype = {}
for name in metahive.scanners.__all__:
plugin = getattr(metahive.scanners, name)
try:
register_plugin = plugin.register
except AttributeError:
print "Plugin %s does not have a register() function" %(name)
pass
else:
supported_mimetypes = register_plugin()
for mimetype in supported_mimetypes:
if mimetype not in scannersByMimetype:
scannersByMimetype[mimetype] = []
scannersByMimetype[mimetype].append(name)
regScan[name] = plugin
db_credentials = metahivesettings.settings.db_credentials()
repoDir = metahivesettings.settings.repo_dir()
#print registeredScanners
#print scannersByMimetype
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--sourcedir", help="Top level directory to work on (e.g. /path/to/upload/folder", required=True)
parser.add_argument("-v", "--verbose", help="Be verbose (more debug output)", required=False, default=False, action='store_true')
parser.add_argument("-c", "--copy-to-repo", help="copy scanned supported files into the media repository", required=False, default=False, action='store_true', dest='copy_to_repo')
parser.add_argument("-d", "--delete-original", help="delete original/duplicate files if we have a copy in the media repository", required=False, default=False, action='store_true', dest='delete_original')
args = parser.parse_args()
if args.copy_to_repo and not repoDir:
print "repository directory is not set in config's [repository] section - cannot copy to repo'"
sys.exit(2)
m=magic.open(magic.MAGIC_MIME_TYPE)
m.load()
def hash_file(filename, hashtype='sha1'):
BUF_SIZE=1024*1024 # to read files (and compute incremental hash) in 1MB blocks, not having to read in 2TB file at once...
if hashtype == 'sha1':
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
data = f.read(BUF_SIZE)
if not data:
break
sha1.update(data)
hexhash = sha1.hexdigest()
return hexhash
def makePathFromHash(hash):
# INPUT: 2ef94a0e9a4ef32fda6e10b83b1e698036b726f1
# Should create a usable full path for this file
# OUTPUT: $repoDir/2/e/f
output = '%s/%s/%s' %(hash[0], hash[1], hash[2])
return output
def getMimeType(filename):
try:
result = m.file(filename)
except Exception as e:
result = False
print repr(e)
return result
def gatherBasicInfo(filenames_array):
"""
This will collect "basic" OS-level information like ctime, mtime, size etc.
It expects an array of filenames (with full path information) and will return a dict with the
full filename as key and the basic info as a dict.
input: [ '/path/to/file/1.jpg', '/path/to/file/2.jpg' ]
output: { '/path/to/file/1.jpg' : { 'hash.sha1': '...', 'ctime': '...' }, ... }
"""
begin = time.time()
fileInfo = {}
for filename in filenames_array:
try:
info = os.stat(filename)
except:
print "Could not stat file '%s'" %(filename)
else:
file_mtime = datetime.fromtimestamp(info.st_mtime)
file_ctime = datetime.fromtimestamp(info.st_ctime)
fileInfo[filename] = {
'ctime': file_ctime,
'mtime': file_mtime,
'size': info.st_size,
}
finish = time.time()
time_taken = finish - begin
files_per_second = len(filenames_array) / float(time_taken)
print "It took %0.2f seconds to gather basic info for %i files (%0.1f files per second)" %(time_taken, len(filenames_array), files_per_second)
return fileInfo
def getRepoStateForFiles(filenames_dict):
"""
Expects a dict of dicts (essentially, the output of "gatherBasicInfo"). Constructs SQL to check
which of the files (if any) we have already in the database.
"""
"""
As we do not want to hash everything again if it's known but not stored in the repo, we will
rely on os.stat + filename as a rough initial check, only hashing if we do not find an exact match...
"""
original_filenames_by_hash = {}
for filename, filedata in filenames_dict.iteritems():
sql = "SELECT id, sha1, file_size, original_ctime as ctime, original_mtime as mtime, is_in_repo FROM files WHERE file_size=%s and original_ctime=%s and original_mtime=%s and original_filename=%s"
numHits = c.execute ( sql, [ filedata['size'], filedata['ctime'], filedata['mtime'], filename ] )
if numHits > 0:
if numHits > 1:
#print "AAAARGH - file %s found more than once in the database - this should never happen" %(filename)
print "<5> More than one hit for %s found in DB, cannot use hash from db, hashing live..."
filenames_dict[filename]['hash.sha1'] = hash_file(filename)
else:
row = c.fetchone()
print "<6> Exactly one match for stat-params for %s found in DB, using hash %s from DB" %(filename, row['sha1'])
filenames_dict[filename]['hash.sha1'] = row['sha1']
else:
print "<6> File %s not known yet by name/size/mtime - hash it" %(filename)
myhash = hash_file(filename)
filenames_dict[filename]['hash.sha1'] = myhash
original_filenames_by_hash[myhash] = filename
hash_lookup = {}
#hash_lookup['463699b9bc849c94e0f45ff2f21b171d2d128bec'] = {'size': 0, 'name': 'undefined name'}
for filename, filedata in filenames_dict.iteritems():
#print filedata
hash_lookup[filedata['hash.sha1']] = { 'size': filedata['size'], 'name': filename }
# I want to create SQL of the form 'SELECT id, filesize FROM files WHERE hash IN ( hash1, hash2, hash3, ... )'
# then compare hash & filesizes
placeholders = ', '.join(['%s'] * len(hash_lookup))
sql = 'SELECT * FROM files WHERE sha1 IN (%s)' %(placeholders)
#print sql
#print hash_lookup.keys()
c.execute( sql, hash_lookup.keys() )
rows = c.fetchall()
# ({'sha1': '463699b9bc849c94e0f45ff2f21b171d2d128bec', 'id': 284L, 'file_size': None},)
known = {}
#print "******"
#print "Original filenames by hash:"
#print original_filenames_by_hash
#print "******"
for row in rows:
if row['sha1'] in hash_lookup and 'name' in hash_lookup[row['sha1']]:
print "%s: %s " %(row['sha1'], hash_lookup[row['sha1']])
myhash = row['sha1']
filename = hash_lookup[myhash]['name']
print "%s: %s " %(myhash, filename)
#if myhash in original_filenames_by_hash and filename != original_filenames_by_hash[myhash]:
if filename != row['original_filename']:
# file is known by a different original name in DB
print "<5> Found new original name %s for the known file %s (%s)" %(row['original_filename'], myhash, filename)
existing_original_name = c.execute('SELECT * FROM original_filenames WHERE file_id=%s AND original_filename=%s', [row['id'], filename])
if existing_original_name < 1:
c.execute('INSERT INTO original_filenames (file_id, original_filename) VALUES (%s, %s)', [row['id'], filename])
print "<6> Alternate name %s for %s added to DB" %(filename, myhash)
else:
print "<7> Alternate name %s already known for %s" %(filename, myhash)
else:
filename = 'unknown filename'
known[row['sha1']] = {
'size': row['file_size'],
'name': filename,
'ctime': filenames_dict[filename]['ctime'],
'mtime': filenames_dict[filename]['mtime'],
'id': row['id'],
'is_in_repo': row['is_in_repo']
}
db.commit() # for any original_filenames changes
notKnown = {}
for hashvalue, value in hash_lookup.iteritems():
if hashvalue not in known:
notKnown[hashvalue] = {
'size': filenames_dict[value['name']]['size'],
'name': value['name'],
'ctime': filenames_dict[value['name']]['ctime'],
'mtime': filenames_dict[value['name']]['mtime'],
'id': None,
'is_in_repo': False
}
#diffkeys = set(hash_lookup) - set(known)
#print hash_lookup
#print known
#print diffkeys
#print notKnown
#print rows
return [ notKnown, known ]
def addFileIntoDB ( filehash, mimetype, extraInfo ):
"""
takes a hash and the "extraInfo" dict with ctime, mtime, size, name and is_in_repo values, then tries to add it into the db.
Returns False on failure or the insert_id on success.
"""
# f7bef5ce2781d8667f2ed85eac4627d532d32222, {'is_in_repo': False, 'ctime': datetime.datetime(2015, 10, 14, 19, 1, 52, 418553), 'mtime': datetime.datetime(2015, 4, 26, 14, 24, 26), 'size': 2628630, 'id': None, 'name': '/treasure/media-throwaway/temp/upload/foobar/IMG_6344.JPG'}
sql = """INSERT INTO files SET
is_in_repo = %s,
original_filename = %s,
type = %s,
sha1 = %s,
file_size = %s,
original_mtime = %s,
original_ctime = %s
"""
try:
affected = c.execute(sql, [ extraInfo['is_in_repo'], extraInfo['name'], mimetype, filehash, extraInfo['size'], extraInfo['mtime'], extraInfo['ctime'] ] )
except Exception as e:
print "Cannot insert file %s into DB" %(filehash)
print repr(e)
return False
print "Successfully INSERTed. Affected: %i" %(affected)
return c.lastrowid
def getExtension(filename):
extensionPos = filename.rfind('.')
return filename[extensionPos+1:].lower()
def getMetadataForFiles(files, scannersOnly = False):
"""
retrieve metadata for a dict of files
if scannersOnly = True, we're only interested in the actual scanners (to know which plugins to run again)
"""
placeholders = ', '.join(['%s'] * len(files))
filesById = []
#print files
for filename, extraInfo in files.iteritems():
if 'id' in extraInfo:
filesById.append(extraInfo['id'])
if scannersOnly:
sql = 'SELECT DISTINCT file_id, scanner FROM metadata WHERE file_id IN (%s) GROUP BY file_id, scanner' %(placeholders)
else:
sql = 'SELECT * FROM metadata WHERE file_id IN (%s)' %(placeholders)
#print sql
#print hash_lookup.keys()
c.execute( sql, filesById )
rows = c.fetchall()
metadata = {}
for row in rows:
fileId = row['file_id']
if fileId not in metadata:
metadata[fileId] = {}
metadata[fileId][row['scanner']] = {}
for k, v in row.iteritems():
metadata[fileId][row['scanner']][k] = v
return metadata
def getFileIDByHash(filehash):
if filehash in known:
if 'id' in known[filehash]:
return known[filehash]['id']
numrows = c.execute('SELECT id FROM files WHERE sha1=%s', [filehash])
if numrows == 1:
return c.fetchone()[0]
return False
def getMetadataFromDB(file_id, scanner = 'all' ):
#print "%s has id %s" %(filehash, file_id)
if not file_id:
return False
#+----------+--------------+------+-----+---------+----------------+
#| Field | Type | Null | Key | Default | Extra |
#+----------+--------------+------+-----+---------+----------------+
#| id | bigint(20) | NO | PRI | NULL | auto_increment |
#| file_id | bigint(20) | NO | | NULL | |
#| scanner | varchar(255) | YES | | NULL | |
#| tagname | varchar(255) | YES | | NULL | |
#| tagvalue | varchar(255) | YES | | NULL | |
#+----------+--------------+------+-----+---------+----------------+
if scanner is 'all':
numrows = c.execute("SELECT * FROM metadata WHERE file_id=%s", [file_id])
else:
numrows = c.execute("SELECT * FROM metadata WHERE file_id=%s AND scanner=%s", [file_id, scanner])
#print "getMeta fetched %i rows" %(numrows)
result = c.fetchall()
metadata = {}
for row in result:
if row['scanner'] not in metadata:
metadata[row['scanner']] = {}
metadata[row['scanner']][row['tagname']] = row['tagvalue']
return metadata
def compareMetadata(old, new):
deleted = {}
added = {}
#print repr(old)
#print repr(new)
for scanner in old:
if scanner not in new:
deleted[scanner] = old[scanner]
else:
for tagname in old[scanner]:
if tagname not in new[scanner]:
if scanner not in deleted:
deleted[scanner] = {}
deleted[scanner][tagname] = old[scanner][tagname]
else:
if str(old[scanner][tagname]) != str(new[scanner][tagname]):
if scanner not in deleted:
deleted[scanner] = {}
if scanner not in added:
added[scanner] = {}
print "value of tag %s differs: %s vs %s" %(tagname, repr(old[scanner][tagname]), repr(new[scanner][tagname]))
deleted[scanner][tagname] = old[scanner][tagname]
added[scanner][tagname] = new[scanner][tagname]
for scanner in new:
if scanner not in old:
added[scanner] = new[scanner]
else:
for tagname in new[scanner]:
if tagname not in old[scanner]:
if scanner not in added:
added[scanner] = {}
added[scanner][tagname] = new[scanner][tagname]
return [ deleted, added ]
def makeString(indict):
for k, v in indict.iteritems():
indict[k] = str(v)
return indict
def putMetadataIntoDB(scanner, filehash, metaDict):
print "Put metadata from scanner %s for filehash %s into DB" %(scanner, filehash)
file_id = getFileIDByHash(filehash)
oldData = getMetadataFromDB(file_id, scanner=scanner)
#print oldData
if not oldData: oldData = { scanner: {} }
newData = { scanner: makeString(metaDict) }
deleted, added = compareMetadata(oldData, newData)
#print "diff:"
#print deleted
#print "--"
#print added
#print "++"
#print "***"
deletedRows = c.execute('DELETE FROM metadata WHERE file_id=%s and scanner=%s', [file_id, scanner])
placeholders = ', '.join(["(%s, '%s', %%s, %%s, %%s, %%s)" %(file_id, scanner)] * len(newData[scanner]))
sql = 'INSERT INTO metadata (file_id, scanner, tagname, tagvalue, tagvalue_float, tagvalue_date) VALUES %s' %(placeholders)
#print sql
#print hash_lookup.keys()
sqlarray = []
for tagname, tagvalue in newData[scanner].iteritems():
sqlarray.append(tagname)
sqlarray.append(tagvalue)
try:
valFloat = float(tagvalue)
except ValueError:
valFloat = None
sqlarray.append(valFloat)
valDate = None
if 'date' in tagname.lower() or 'time' in tagname.lower():
try:
# 2015:08:22 19:09:58.241
# 2015:09:14
# 2015:08:22 19:09:58.241
# 2015:08:22 19:09:58+02:00
# 2015:08:22 19:09:58
# 2015:08:22 19:09:58.241
# 17:09:56.52
# 2015:08:22 17:09:56.52Z
# 2015:08:22
m = re.search('^((19|20|21)[0-9][0-9])[-:._]((0[1-9]|1[0-2]))[-:._]([0-3][0-9])(.*)', tagvalue )
if m:
valDate = "%s-%s-%s %s" %(m.group(1), m.group(3), m.group(5), m.group(6))
print "Matched %s in %s => %s" %(tagvalue, tagname, valDate)
else:
m = re.search('^([01][0-9]|2[0-3])[-:._]([0-5][0-9])[-:._]([0-5][0-9])(\.[0-9]+)?', tagvalue )
if m:
valDate = "1970-01-01 %s:%s:%s" %(m.group(1), m.group(2), m.group(3))
if m.group(4):
valDate = "%s%s" %(valDate, m.group(4))
print "Matched %s in %s => %s" %(tagvalue, tagname, valDate)
#else:
#print "Could not match %s in %s" %(tagvalue, tagname)
except ValueError:
valDate = None
sqlarray.append(valDate)
try:
numrows = c.execute( sql, sqlarray )
except Exception as e:
print "error on INSERT metadata"
print repr(e)
else:
print "<7> %i rows INSERTed for scanner %s on file %s" %(numrows, scanner, file_id)
db.commit()
def getExtension(filename):
extensionPos = filename.rfind('.')
return filename[extensionPos+1:].lower()
def safelyImportFileIntoRepo ( filehash, extraInfo ):
extension = getExtension(extraInfo['name'])
targetFilename = '%s/%s/%s.%s' %(repoDir, makePathFromHash(filehash), filehash, extension)
print "<7> safely import %s to %s" %(extraInfo['name'], targetFilename)
try:
dirExists = os.stat(os.path.dirname(targetFilename))
except Exception as e:
if e.errno == 2:
# No such file or directory
try:
os.makedirs(os.path.dirname(targetFilename))
except Exception as e:
print "<4> Could not create repo directory: %s" %(os.path.dirname(targetFilename))
print repr(e)
return False
else:
print repr(e)
return False
if os.path.exists(targetFilename):
# file already exists in repo
destHash = hash_file(targetFilename)
if destHash != filehash:
print "<4> Hash collision - a file with the same hash %s already exists in the repo - this should never happen" %(destHash)
return False
else:
# file in repo is the same we want to import so don't do anything
print "<7> %s already exists in the repo, doing nothing" %(filehash)
return True
# only if target does not exist yet:
try:
shutil.copy2(extraInfo['name'], targetFilename) # copy2 preserves mtime/atime
except Exception as e:
print "<5> Could not copy '%s' to '%s'" %(filename, targetFilename)
print repr(e)
return False
destHash = hash_file(targetFilename)
if destHash != filehash:
print "<5> Newly copied file has non-matching hash: original = '%s', copy = '%s'" %(filehash, destHash)
return False
else:
print "<7> Successfully imported %s into the repo" %(filehash)
return True
if not db_credentials:
print "No database credentials, cannot run."
sys.exit(1)
try:
db = MySQLdb.connect(user=db_credentials['db_username'], passwd=db_credentials['db_password'], db=db_credentials['db_name'], cursorclass=MySQLdb.cursors.DictCursor)
except Exception as e:
print "Could not connect to SQL Server"
print repr(e)
sys.exit(2)
try:
c = db.cursor()
except Exception as e:
print "Could not acquire a DB cursor"
print repr(e)
sys.exit(3)
filesByMimetype = {}
debugcount = 0
for (dirpath, dirnames, filenames) in os.walk(args.sourcedir, topdown=True, onerror=None, followlinks=False):
if filenames:
print "Working on directory %s" %(dirpath)
for filename in filenames:
fullfilename = '%s/%s' %(dirpath, filename)
try:
mimetype = getMimeType(fullfilename)
except Exception as e:
print "Could not detect MIME type for %s" %(fullfilename)
mimetype = None
continue
if mimetype not in filesByMimetype:
filesByMimetype[mimetype] = []
filesByMimetype[mimetype].append(fullfilename)
debugcount += 1
if debugcount > 32:
print "*** DEBUG: breaking after %i files ***" %(debugcount)
break
for mimetype in filesByMimetype:
if mimetype in scannersByMimetype:
# supported file (we have at least one scanner that can give us metadata), so hash it...
filesBasicInfo = gatherBasicInfo(filesByMimetype[mimetype])
# check whether we have data already in SQL; figure out whether we need to import & delete... etc.
notKnown, known = getRepoStateForFiles ( filesBasicInfo )
hashByFilename = {}
for filehash, extraInfo in notKnown.iteritems():
# extraInfo is hash + ctime etc
print "unknown %s file: %s, info: %s" %(mimetype, filehash, extraInfo)
fileId = addFileIntoDB(filehash, mimetype, extraInfo)
if fileId:
# hmmm. When to commit the DB? After every file, or at some other point?
try:
db.commit()
except Exception as e:
print "Could not commit DB changes."
print repr(e)
else:
extraInfo['id'] = fileId
known[filehash] = extraInfo
hashByFilename[extraInfo['name']] = filehash
for filehash, extraInfo in known.iteritems():
# extraInfo is hash, ctime, db_id and the "lives_in_repo" field.
print "known file: %s, info: %s" %(filehash, extraInfo)
if args.copy_to_repo and not extraInfo['is_in_repo']:
try:
importedIntoRepo = safelyImportFileIntoRepo(filehash, extraInfo)
except Exception as e:
print repr(e)
print "Could not import file %s(%s) into repo" %(filehash, extraInfo['name'])
else:
if not importedIntoRepo:
print "Could not import file %s(%s) into repo" %(filehash, extraInfo['name'])
else:
try:
affected_rows = c.execute('UPDATE files SET is_in_repo=True WHERE id=%s', [extraInfo['id']])
except:
print "Could not update DB status for file %s (id %s)" %(filehash, extraInfo['id'])
else:
print "%i rows updated for file %i" %(affected_rows, extraInfo['id'])
extraInfo['is_in_repo'] = True
known[filehash]['is_in_repo'] = True
db.commit()
if args.delete_original and extraInfo['is_in_repo']:
extension = getExtension(extraInfo['name'])
targetFilename = '%s/%s/%s.%s' %(repoDir, makePathFromHash(filehash), filehash, extension)
if os.path.exists(targetFilename) and hash_file(targetFilename) == filehash:
print "<6> We have a valid copy of %s in the repo, going to delete %s" %(filehash, extraInfo['name'])
try:
os.unlink(extraInfo['name'])
except Exception as e:
print "Could not delete original %s" %(extraInfo['name'])
print repr(e)
else:
print "<6> Successfully deleted original of %s (%s)" %(filehash, extraInfo['name'])
else:
print "<4> A file that we think is in the repo does not exist - NOT deleting original: %s" %(filehash)
try:
importedIntoRepo = safelyImportFileIntoRepo(filehash, extraInfo)
except Exception as e:
print repr(e)
print "Could not import file %s(%s) into repo" %(filehash, extraInfo['name'])
else:
if not importedIntoRepo:
print "Could not import file %s(%s) into repo" %(filehash, extraInfo['name'])
else:
print "<5> Re-imported file %s into the repo" %(filehash)
#print "not found in Repo: %s" %("\n".join(notKnown))
#print "already in Repo: %s" %("\n".join(known))
knownMetaData = getMetadataForFiles(files = known, scannersOnly = True)
print "=================="
print "knownMetaData:"
print knownMetaData
print "=================="
hashById = {}
for k, v in known.iteritems():
#print "hbF: %s = %s" %(k, v)
if v['name'] not in hashByFilename:
hashByFilename[v['name']] = k
if v['id'] not in hashById:
hashById[v['id']] = k
else:
print "Duplicate filename %s?! This should not happen" %(v['name'])
#print "hbF:"
#print hashByFilename
#print "**"
# iterate over registered metadata scanners for the current mimetype
for plugin in scannersByMimetype[mimetype]:
begin = time.time()
list_of_files_to_scan = []
for filename in filesByMimetype[mimetype]:
filehash = None
if filename in hashByFilename:
filehash = hashByFilename[filename]
if filehash in known:
if 'id' in known[filehash]:
fileId = known[filehash]['id']
if fileId in knownMetaData:
fmd = knownMetaData[fileId]
if plugin in fmd:
print "Not scanning file %s with scanner %s, already have data in DB" %(filename, plugin)
continue
if filehash and filehash in known:
if known[filehash]['is_in_repo']:
extension = getExtension(extraInfo['name'])
targetFilename = '%s/%s/%s.%s' %(repoDir, makePathFromHash(filehash), filehash, extension)
list_of_files_to_scan.append(targetFilename)
else:
list_of_files_to_scan.append(filename)
print "list of files to scan with %s: %s" %(plugin, list_of_files_to_scan)
if list_of_files_to_scan:
metadata = regScan[plugin].scanBulk(list_of_files_to_scan)
finish = time.time()
time_taken = finish - begin
if time_taken <= 0:
files_per_second = -1 # avoid division by zero
else:
files_per_second = len(filesByMimetype[mimetype]) / float(time_taken)
print "plugin %s took %0.2f seconds to parse %i files (%0.1f files per second)" %(plugin, time_taken, len(filesByMimetype[mimetype]), files_per_second)
else:
metadata = False
if metadata:
for filename, metaDict in metadata.iteritems():
if filename in hashByFilename:
filehash = hashByFilename[filename]
elif filename.startswith(repoDir):
filehash = os.path.basename(filename)[1:os.path.basename(filename).rfind('.')]
else:
print "file %s - no hash found, skip" %(filename)
continue
try:
putMetadataIntoDB(plugin, filehash, metaDict)
except Exception as e:
print "Could not put metadata into DB"
print repr(e)
else:
print "<7> successfully updated metadata for %s" %(filename)
print "%s: %s" %(filename, metaDict)
else:
if args.verbose:
print "There is no plugin to handle mimetype %s." %(mimetype)
print filesByMimetype[mimetype]
print "--"
|
gpl-3.0
| -1,959,386,333,837,326,000
| 41.127031
| 281
| 0.552735
| false
| 3.956715
| false
| false
| false
|
TheProjecter/kassie
|
exemples/chat.py
|
1
|
3905
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Cet exemple met en place un système de chat minimaliste gérant :
- les connexions multiples
- les déconnexions fortuites
- la réception ou l'envoie de messages aux clients
On utilise les fonctions de callback pour paramétrer comment doit
réagir le serveur lors d'une connexion, d'une déconnexion ou d'une
réception d'un message. Consultez le code pour plus d'informations.
"""
import os
import sys
sys.path.append(os.getcwd() + "/../src")
from reseau.connexions.serveur import *
fin_ligne = "\r\n"
# Fonctions de callback
def connexion(serveur, client):
"""Que se passe-t-il quand client se connecte ?"""
print("Connexion du client {0}".format(client))
for c in serveur.clients.values():
if c is not client:
c.envoyer("$$ {0} se connecte au serveur{1}".format( \
client, fin_ligne).encode())
def deconnexion(serveur, client):
"""Que se passe-t-il quand client se déconnecte ?"""
print("Déconnexion du client {0} : {1}".format(client, client.retour))
for c in serveur.clients.values():
if c is not client:
c.envoyer("** {0} se déconnecte du serveur{1}".format( \
client, fin_ligne).encode())
def reception(serveur, client):
"""Que se passe-t-il quand client envoie un message au serveur ?"""
msg = client.get_message() # msg contient un type bytes, aps str
print("J'ai réceptionné en bytes {0}".format(msg))
for c in serveur.clients.values():
c.envoyer("<{0}> {1}{2}".format(client.id, msg, fin_ligne).encode())
# Création et paramétrage du serveur
serveur = ConnexionServeur(4000) # test sur le port 4000
# Paramétrage des callbacks
# callback lors de la connexion
serveur.callbacks["connexion"].fonction = connexion
serveur.callbacks["connexion"].parametres = (serveur,)
# callback lors de la déconnexion
serveur.callbacks["deconnexion"].fonction = deconnexion
serveur.callbacks["deconnexion"].parametres = (serveur,)
# callback lors de la réception de message
serveur.callbacks["reception"].fonction = reception
serveur.callbacks["reception"].parametres = (serveur,)
# Fin du paramétrage du serveur
serveur.init() # initialisation, indispensable
while True: # le serveur ne s'arrête pas naturellement
serveur.verifier_connexions()
serveur.verifier_receptions()
|
bsd-3-clause
| -4,216,159,609,555,280,000
| 38.242424
| 79
| 0.732304
| false
| 3.493705
| false
| false
| false
|
IsCoolEntertainment/debpkg_python-boto
|
boto/route53/connection.py
|
1
|
16973
|
# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# Copyright (c) 2011 Blue Pines Technologies LLC, Brad Carleton
# www.bluepines.org
# Copyright (c) 2012 42 Lines Inc., Jim Browne
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import exception
import random
import urllib
import uuid
import xml.sax
import boto
from boto.connection import AWSAuthConnection
from boto import handler
import boto.jsonresponse
from boto.route53.record import ResourceRecordSets
from boto.route53.zone import Zone
HZXML = """<?xml version="1.0" encoding="UTF-8"?>
<CreateHostedZoneRequest xmlns="%(xmlns)s">
<Name>%(name)s</Name>
<CallerReference>%(caller_ref)s</CallerReference>
<HostedZoneConfig>
<Comment>%(comment)s</Comment>
</HostedZoneConfig>
</CreateHostedZoneRequest>"""
#boto.set_stream_logger('dns')
class Route53Connection(AWSAuthConnection):
DefaultHost = 'route53.amazonaws.com'
"""The default Route53 API endpoint to connect to."""
Version = '2013-04-01'
"""Route53 API version."""
XMLNameSpace = 'https://route53.amazonaws.com/doc/2013-04-01/'
"""XML schema for this Route53 API version."""
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
port=None, proxy=None, proxy_port=None,
host=DefaultHost, debug=0, security_token=None,
validate_certs=True, https_connection_factory=None,
profile_name=None):
super(Route53Connection, self).__init__(host,
aws_access_key_id, aws_secret_access_key,
True, port, proxy, proxy_port, debug=debug,
security_token=security_token,
validate_certs=validate_certs,
https_connection_factory=https_connection_factory,
profile_name=profile_name)
def _required_auth_capability(self):
return ['route53']
def make_request(self, action, path, headers=None, data='', params=None):
if params:
pairs = []
for key, val in params.iteritems():
if val is None:
continue
pairs.append(key + '=' + urllib.quote(str(val)))
path += '?' + '&'.join(pairs)
return super(Route53Connection, self).make_request(action, path,
headers, data,
retry_handler=self._retry_handler)
# Hosted Zones
def get_all_hosted_zones(self, start_marker=None, zone_list=None):
"""
Returns a Python data structure with information about all
Hosted Zones defined for the AWS account.
:param int start_marker: start marker to pass when fetching additional
results after a truncated list
:param list zone_list: a HostedZones list to prepend to results
"""
params = {}
if start_marker:
params = {'marker': start_marker}
response = self.make_request('GET', '/%s/hostedzone' % self.Version,
params=params)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element(list_marker='HostedZones',
item_marker=('HostedZone',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
if zone_list:
e['ListHostedZonesResponse']['HostedZones'].extend(zone_list)
while 'NextMarker' in e['ListHostedZonesResponse']:
next_marker = e['ListHostedZonesResponse']['NextMarker']
zone_list = e['ListHostedZonesResponse']['HostedZones']
e = self.get_all_hosted_zones(next_marker, zone_list)
return e
def get_hosted_zone(self, hosted_zone_id):
"""
Get detailed information about a particular Hosted Zone.
:type hosted_zone_id: str
:param hosted_zone_id: The unique identifier for the Hosted Zone
"""
uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id)
response = self.make_request('GET', uri)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element(list_marker='NameServers',
item_marker=('NameServer',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
def get_hosted_zone_by_name(self, hosted_zone_name):
"""
Get detailed information about a particular Hosted Zone.
:type hosted_zone_name: str
:param hosted_zone_name: The fully qualified domain name for the Hosted
Zone
"""
if hosted_zone_name[-1] != '.':
hosted_zone_name += '.'
all_hosted_zones = self.get_all_hosted_zones()
for zone in all_hosted_zones['ListHostedZonesResponse']['HostedZones']:
#check that they gave us the FQDN for their zone
if zone['Name'] == hosted_zone_name:
return self.get_hosted_zone(zone['Id'].split('/')[-1])
def create_hosted_zone(self, domain_name, caller_ref=None, comment=''):
"""
Create a new Hosted Zone. Returns a Python data structure with
information about the newly created Hosted Zone.
:type domain_name: str
:param domain_name: The name of the domain. This should be a
fully-specified domain, and should end with a final period
as the last label indication. If you omit the final period,
Amazon Route 53 assumes the domain is relative to the root.
This is the name you have registered with your DNS registrar.
It is also the name you will delegate from your registrar to
the Amazon Route 53 delegation servers returned in
response to this request.A list of strings with the image
IDs wanted.
:type caller_ref: str
:param caller_ref: A unique string that identifies the request
and that allows failed CreateHostedZone requests to be retried
without the risk of executing the operation twice. If you don't
provide a value for this, boto will generate a Type 4 UUID and
use that.
:type comment: str
:param comment: Any comments you want to include about the hosted
zone.
"""
if caller_ref is None:
caller_ref = str(uuid.uuid4())
params = {'name': domain_name,
'caller_ref': caller_ref,
'comment': comment,
'xmlns': self.XMLNameSpace}
xml_body = HZXML % params
uri = '/%s/hostedzone' % self.Version
response = self.make_request('POST', uri,
{'Content-Type': 'text/xml'}, xml_body)
body = response.read()
boto.log.debug(body)
if response.status == 201:
e = boto.jsonresponse.Element(list_marker='NameServers',
item_marker=('NameServer',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
else:
raise exception.DNSServerError(response.status,
response.reason,
body)
def delete_hosted_zone(self, hosted_zone_id):
uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id)
response = self.make_request('DELETE', uri)
body = response.read()
boto.log.debug(body)
if response.status not in (200, 204):
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
# Resource Record Sets
def get_all_rrsets(self, hosted_zone_id, type=None,
name=None, identifier=None, maxitems=None):
"""
Retrieve the Resource Record Sets defined for this Hosted Zone.
Returns the raw XML data returned by the Route53 call.
:type hosted_zone_id: str
:param hosted_zone_id: The unique identifier for the Hosted Zone
:type type: str
:param type: The type of resource record set to begin the record
listing from. Valid choices are:
* A
* AAAA
* CNAME
* MX
* NS
* PTR
* SOA
* SPF
* SRV
* TXT
Valid values for weighted resource record sets:
* A
* AAAA
* CNAME
* TXT
Valid values for Zone Apex Aliases:
* A
* AAAA
:type name: str
:param name: The first name in the lexicographic ordering of domain
names to be retrieved
:type identifier: str
:param identifier: In a hosted zone that includes weighted resource
record sets (multiple resource record sets with the same DNS
name and type that are differentiated only by SetIdentifier),
if results were truncated for a given DNS name and type,
the value of SetIdentifier for the next resource record
set that has the current DNS name and type
:type maxitems: int
:param maxitems: The maximum number of records
"""
params = {'type': type, 'name': name,
'Identifier': identifier, 'maxitems': maxitems}
uri = '/%s/hostedzone/%s/rrset' % (self.Version, hosted_zone_id)
response = self.make_request('GET', uri, params=params)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
rs = ResourceRecordSets(connection=self, hosted_zone_id=hosted_zone_id)
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body, h)
return rs
def change_rrsets(self, hosted_zone_id, xml_body):
"""
Create or change the authoritative DNS information for this
Hosted Zone.
Returns a Python data structure with information about the set of
changes, including the Change ID.
:type hosted_zone_id: str
:param hosted_zone_id: The unique identifier for the Hosted Zone
:type xml_body: str
:param xml_body: The list of changes to be made, defined in the
XML schema defined by the Route53 service.
"""
uri = '/%s/hostedzone/%s/rrset' % (self.Version, hosted_zone_id)
response = self.make_request('POST', uri,
{'Content-Type': 'text/xml'},
xml_body)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
def get_change(self, change_id):
"""
Get information about a proposed set of changes, as submitted
by the change_rrsets method.
Returns a Python data structure with status information about the
changes.
:type change_id: str
:param change_id: The unique identifier for the set of changes.
This ID is returned in the response to the change_rrsets method.
"""
uri = '/%s/change/%s' % (self.Version, change_id)
response = self.make_request('GET', uri)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
def create_zone(self, name):
"""
Create a new Hosted Zone. Returns a Zone object for the newly
created Hosted Zone.
:type name: str
:param name: The name of the domain. This should be a
fully-specified domain, and should end with a final period
as the last label indication. If you omit the final period,
Amazon Route 53 assumes the domain is relative to the root.
This is the name you have registered with your DNS registrar.
It is also the name you will delegate from your registrar to
the Amazon Route 53 delegation servers returned in
response to this request.
"""
zone = self.create_hosted_zone(name)
return Zone(self, zone['CreateHostedZoneResponse']['HostedZone'])
def get_zone(self, name):
"""
Returns a Zone object for the specified Hosted Zone.
:param name: The name of the domain. This should be a
fully-specified domain, and should end with a final period
as the last label indication.
"""
name = self._make_qualified(name)
for zone in self.get_zones():
if name == zone.name:
return zone
def get_zones(self):
"""
Returns a list of Zone objects, one for each of the Hosted
Zones defined for the AWS account.
"""
zones = self.get_all_hosted_zones()
return [Zone(self, zone) for zone in
zones['ListHostedZonesResponse']['HostedZones']]
def _make_qualified(self, value):
"""
Ensure passed domain names end in a period (.) character.
This will usually make a domain fully qualified.
"""
if type(value) in [list, tuple, set]:
new_list = []
for record in value:
if record and not record[-1] == '.':
new_list.append("%s." % record)
else:
new_list.append(record)
return new_list
else:
value = value.strip()
if value and not value[-1] == '.':
value = "%s." % value
return value
def _retry_handler(self, response, i, next_sleep):
status = None
boto.log.debug("Saw HTTP status: %s" % response.status)
if response.status == 400:
code = response.getheader('Code')
if code and 'PriorRequestNotComplete' in code:
# This is a case where we need to ignore a 400 error, as
# Route53 returns this. See
# http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html
msg = "%s, retry attempt %s" % (
'PriorRequestNotComplete',
i
)
next_sleep = random.random() * (2 ** i)
i += 1
status = (msg, i, next_sleep)
return status
|
mit
| 573,871,493,404,455,400
| 38.38051
| 94
| 0.567548
| false
| 4.49735
| false
| false
| false
|
CountZer0/PipelineConstructionSet
|
python/common/core/globalVariables.py
|
1
|
1227
|
'''
Author: Jason Parks
Created: Apr 22, 2012
Module: common.core.globalVariables
Purpose: to import globalVariables
'''
# Location of Toolset
#toolsLocation = '/Users/jasonparks/Documents/workspace/PipelineConstructionSet'
toolsLocation = 'C:/Users/jason/git/PipelineConstructionSet'
# NOTE!: It is necessary to manually add the above location's
# python directory, i.e-
#
# PYTHONPATH = 'C:/Users/jason/git/PipelineConstructionSet/python'
#
# to the PYTHONPATH environment variable on all user's
# machines whom want to use Pipeline Construction set
# optionally set to a space on the network for easy T.A. access
logLocation = 'C:/temp/pipelineConstructionSet'
# Name your games here:
teamA = 'GreatGameA'
teamB = 'GreatGameB'
teamC = 'GreatGameC'
teamD = 'GreatGameD'
# You need to change the name of the file
# ./PipelineConstructionSet/schemas/GreatGameA.xml
# and the xml header info in the file as well
# If you are making tools for more than one team,
# you'll need to make more GreatGame*.xml files
# manually update to date/time
build = '103012-20.33'
# This will show up in the PythonEditor or ScriptEditor
# when our DCC app first launches the toolMenu.
print "common.core.globalVariables imported"
|
bsd-3-clause
| 4,143,952,701,070,569,500
| 27.55814
| 80
| 0.764466
| false
| 3.361644
| false
| false
| false
|
veter-team/mechspeak
|
src/imshow.py
|
1
|
1438
|
import os, sys
import io
import Tkinter
import Image, ImageTk
from time import sleep
import cStringIO
import paho.mqtt.client as mqtt
host = "test.mosquitto.org"
#host = "localhost"
qos = 0
sensors_topic = "/rtmsg/d25638bb-17c2-46ac-b26e-ce1f67268088/sensors/"
camera_topic = sensors_topic + "camera1"
imgcnt = 0
def on_message(client, userdata, message):
global imgcnt
global old_label_image
global root
try:
image1 = Image.open(cStringIO.StringIO(message.payload))
root.geometry('%dx%d' % (image1.size[0],image1.size[1]))
tkpi = ImageTk.PhotoImage(image1)
label_image = Tkinter.Label(root, image=tkpi)
label_image.place(x=0,y=0,width=image1.size[0],height=image1.size[1])
imgcnt += 1
root.title(str(imgcnt))
if old_label_image is not None:
old_label_image.destroy()
old_label_image = label_image
root.update() # wait until user clicks the window
except Exception, e:
# This is used to skip anything not an image.
# Image.open will generate an exception if it cannot open a file.
print(e)
mqttc.disconnect()
mqttc = mqtt.Client("zatoichi" + str(os.getpid()))
print('Connecting...')
mqttc.connect(host)
print('Connected')
mqttc.on_message = on_message
mqttc.subscribe(camera_topic)
root = Tkinter.Tk()
root.geometry('+%d+%d' % (128, 128))
old_label_image = None
mqttc.loop_forever()
|
mit
| -6,768,945,433,732,105,000
| 25.62963
| 77
| 0.666898
| false
| 3.126087
| false
| false
| false
|
hariseldon78/Teacup_gen3_customized
|
createTemperatureLookup.py
|
1
|
6248
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Creates a C code lookup table for doing ADC to temperature conversion
# on a microcontroller
# based on: http://hydraraptor.blogspot.com/2007/10/measuring-temperature-easy-way.html
# Modified Thu 10 Feb 2011 02:02:28 PM MST jgilmore for 5D_on_arduino firmware
# temps are now in 14.2 fixed point notation (i.e. measured in quarter-degrees)
# temps are not permitted to be negative (BUG:may result in numtemps fewer than requested)
# bugfix: --num-temps command line option works.
"""Thermistor Value Lookup Table Generator
Generates lookup to temperature values for use in a microcontroller in C format based on:
http://hydraraptor.blogspot.com/2007/10/measuring-temperature-easy-way.html
The main use is for Arduino programs that read data from the circuit board described here:
http://make.rrrf.org/ts-1.0
Usage: python createTemperatureLookup.py [options]
Options:
-h, --help show this help
--r0=... thermistor rating where # is the ohm rating of the thermistor at t0 (eg: 10K = 10000)
--t0=... thermistor temp rating where # is the temperature in Celsuis to get r0 (from your datasheet)
--beta=... thermistor beta rating. see http://reprap.org/bin/view/Main/MeasuringThermistorBeta
--r1=... R1 rating where # is the ohm rating of R1 (eg: 10K = 10000)
--r2=... R2 rating where # is the ohm rating of R2 (eg: 10K = 10000)
--num-temps=... the number of temperature points to calculate (default: 20)
--max-adc=... the max ADC reading to use. if you use R1, it limits the top value for the thermistor circuit, and thus the possible range of ADC values
It is suggested to generate more values than you need, and delete some of the ones in the ranges
that aren't interesting. This will improve accuracy in the temperature ranges that are important to you.
"""
from math import *
import sys
import getopt
class Thermistor:
"Class to do the thermistor maths"
def __init__(self, r0, t0, beta, r1, r2):
self.r0 = r0 # stated resistance, e.g. 10K
self.t0 = t0 + 273.15 # temperature at stated resistance, e.g. 25C
self.beta = beta # stated beta, e.g. 3500
self.vadc = 5.0 # ADC reference
self.vcc = 5.0 # supply voltage to potential divider
self.k = r0 * exp(-beta / self.t0) # constant part of calculation
if r1 > 0:
self.vs = r1 * self.vcc / (r1 + r2) # effective bias voltage
self.rs = r1 * r2 / (r1 + r2) # effective bias impedance
else:
self.vs = self.vcc # effective bias voltage
self.rs = r2 # effective bias impedance
def temp(self,adc):
"Convert ADC reading into a temperature in Celcius"
v = adc * self.vadc / 1024 # convert the 10 bit ADC value to a voltage
r = self.rs * v / (self.vs - v) # resistance of thermistor
return (self.beta / log(r / self.k)) - 273.15 # temperature
def setting(self, t):
"Convert a temperature into a ADC value"
r = self.r0 * exp(self.beta * (1 / (t + 273.15) - 1 / self.t0)) # resistance of the thermistor
v = self.vs * r / (self.rs + r) # the voltage at the potential divider
return round(v / self.vadc * 1024) # the ADC reading
def main(argv):
r0 = 10000;
t0 = 25;
beta = 3947;
r1 = 680;
r2 = 1600;
num_temps = int(20);
max_adc = int(1023);
try:
opts, args = getopt.getopt(argv, "h", ["help", "r0=", "t0=", "beta=", "r1=", "r2=", "max-adc=", "num-temps="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt == "--r0":
r0 = int(arg)
elif opt == "--t0":
t0 = int(arg)
elif opt == "--beta":
beta = int(arg)
elif opt == "--r1":
r1 = int(arg)
elif opt == "--r2":
r2 = int(arg)
elif opt == "--max-adc":
max_adc = int(arg)
elif opt == "--num-temps":
num_temps = int(arg)
increment = int(max_adc/(num_temps-1));
t = Thermistor(r0, t0, beta, r1, r2)
adcs = range(1, max_adc, increment);
# adcs = [1, 20, 25, 30, 35, 40, 45, 50, 60, 70, 80, 90, 100, 110, 130, 150, 190, 220, 250, 300]
first = 1
#Chop of negative temperatures (as we're using a unsigned 16-bit value for temp)
for i in range(0,len(adcs)):
if int(t.temp(adcs[i])*4) < 0:
adcs=adcs[0:i+1]
#Replace this with the ADC reading for 0C
adcs[i]=int(t.setting(0))
#If the closes ADC reading to 0C is negative, convert to next highest ADC reading
if int(t.temp(adcs[i])*4)<0:
adcs[i] -=1
break
print "// Thermistor lookup table"
print "// default thermistor lookup table"
print "// You may be able to improve the accuracy of this table in various ways."
print "// 1. Measure the actual resistance of the resistor. It's \"nominally\" 4.7K, but that's ± 5%."
print "// 2. Measure the actual beta of your thermistor:http://reprap.org/wiki/MeasuringThermistorBeta"
print "// 3. Generate more table entries than you need, then trim down the ones in uninteresting ranges."
print "// In either case you'll have to regenerate this table, which requires python, which is difficult to install on windows."
print "// Since you'll have to do some testing to determine the correct temperature for your application anyway, you"
print "// may decide that the effort isn't worth it. Who cares if it's reporting the \"right\" temperature as long as it's"
print "// keeping the temperature steady enough to print, right?"
print "// ./createTemperatureLookup.py --r0=%s --t0=%s --r1=%s --r2=%s --beta=%s --max-adc=%s" % (r0, t0, r1, r2, beta, max_adc)
print "// r0: %s" % (r0)
print "// t0: %s" % (t0)
print "// r1: %s" % (r1)
print "// r2: %s" % (r2)
print "// beta: %s" % (beta)
print "// max adc: %s" % (max_adc)
print "#define NUMTEMPS %s" % (len(adcs))
print "// {ADC, temp*4 }, // temp"
print "uint16_t temptable[NUMTEMPS][2] PROGMEM = {"
counter = 0
for adc in adcs:
counter = counter +1
if counter == len(adcs):
print " {%s, %s} // %s C" % (adc, int(t.temp(adc)*4), t.temp(adc))
else:
print " {%s, %s}, // %s C" % (adc, int(t.temp(adc)*4), t.temp(adc))
print "};"
def usage():
print __doc__
if __name__ == "__main__":
main(sys.argv[1:])
|
gpl-2.0
| -2,905,846,211,853,608,000
| 39.044872
| 154
| 0.638547
| false
| 2.829257
| false
| false
| false
|
jtraver/dev
|
python/graphics/circles8.py
|
1
|
24535
|
#!/usr/bin/python
# http://mcsp.wartburg.edu/zelle/python/graphics.py
# https://mcsp.wartburg.edu/zelle/python/graphics/graphics/index.html
import math
from graphics import *
XSCALE = 2550
YSCALE = 1310
XCENTER = XSCALE / 2
YCENTER = YSCALE / 2
# https://en.wikipedia.org/wiki/Incircle_and_excircles_of_a_triangle#Trilinear_coordinates
# {\displaystyle \left({\frac {ax_{a}+bx_{b}+cx_{c}}{a+b+c}},{\frac {ay_{a}+by_{b}+cy_{c}}{a+b+c}}\right)={\frac {a\left(x_{a},y_{a}\right)+b\left(x_{b},y_{b}\right)+c\left(x_{c},y_{c}\right)}{a+b+c}}.}
# {ax_{a}+bx_{b}+cx_{c}}{a+b+c}},{{ay_{a}+by_{b}+cy_{c}}{a+b+c}}
def circles5(win, scale):
white1 = color_rgb(255, 255, 255)
black1 = color_rgb(0, 0, 0)
# win.setBackground("black")
win.setBackground(black1)
red1 = color_rgb(255, 0, 0)
green1 = color_rgb(0, 255, 0)
blue1 = color_rgb(0, 0, 255)
print "red1 = %s" % str(red1)
print "green1 = %s" % str(green1)
print "blue1 = %s" % str(blue1)
rb_magenta1 = color_rgb(255, 0, 255)
gb_cyan1 = color_rgb(0, 255, 255)
rg_yellow1 = color_rgb(255, 255, 0)
rm_rose1 = color_rgb(255, 0, 127)
bm_violet1 = color_rgb(127, 0, 255)
bc_azure1 = color_rgb(0, 127, 255)
gc_green1 = color_rgb(0, 255, 127)
gy_chart1 = color_rgb(127, 255, 0)
ry_orange1 = color_rgb(255, 127, 0)
# red magenta blue cyan green yellow
# rose violet azure spring-green chartreuse orange
radius1 = 10 * scale
diameter1 = radius1 * 2
npoints = 6
inc1 = (math.pi * 2) / npoints
theta1 = 0
xs = []
ys = []
xa = XCENTER * diameter1
ya = YCENTER * diameter1
for i1 in range(npoints):
x1 = (math.sin(theta1) * diameter1) + XCENTER
xs.append(x1)
y1 = (math.cos(theta1) * diameter1) + YCENTER
ys.append(y1)
theta1 += inc1
# draw the "bottom" layer first
## rm_rose1 = color_rgb(255, 0, 127)
# bm_violet1 = color_rgb(127, 0, 255)
## bc_azure1 = color_rgb(0, 127, 255)
# gc_green1 = color_rgb(0, 255, 127)
## gy_chart1 = color_rgb(127, 255, 0)
# ry_orange1 = color_rgb(255, 127, 0)
# # red magenta blue cyan green yellow
# # rose violet azure spring-green chartreuse orange
xb4 = xs[5] * diameter1
yb4 = ys[5] * diameter1
xc4 = xs[0] * diameter1
yc4 = ys[0] * diameter1
x4 = (xa + xb4 + xc4) / (3 * diameter1)
y4 = (ya + yb4 + yc4) / (3 * diameter1)
c4 = Circle(Point(x4, y4), 10 * scale)
# c4.setOutline(bm_violet1)
# c4.setOutline(gc_green1)
c4.setOutline(ry_orange1)
c4.setFill(ry_orange1)
c4.setWidth(4)
c4.draw(win)
xb5 = xs[1] * diameter1
yb5 = ys[1] * diameter1
xc5 = xs[2] * diameter1
yc5 = ys[2] * diameter1
x5 = (xa + xb5 + xc5) / (3 * diameter1)
y5 = (ya + yb5 + yc5) / (3 * diameter1)
c5 = Circle(Point(x5, y5), 10 * scale)
c5.setOutline(bm_violet1)
c5.setFill(bm_violet1)
c5.setWidth(4)
c5.draw(win)
xb6 = xs[3] * diameter1
yb6 = ys[3] * diameter1
xc6 = xs[4] * diameter1
yc6 = ys[4] * diameter1
x6 = (xa + xb6 + xc6) / (3 * diameter1)
y6 = (ya + yb6 + yc6) / (3 * diameter1)
c6 = Circle(Point(x6, y6), 10 * scale)
c6.setOutline(gc_green1)
c6.setFill(gc_green1)
c6.setWidth(4)
c6.draw(win)
# https://en.wikipedia.org/wiki/Color_wheel
# https://en.wikipedia.org/wiki/File:Color_star-en_(tertiary_names).svg
# red purple blue green yellow orange
# magenta, violet, teal, chartreuse, amber, vermilion
# c0.setOutline("red") #FF0000
# c0.setOutline("purple") #A020F0
# c0.setOutline("blue") #0000FF
# c0.setOutline("green") #00FF00
# c0.setOutline("yellow") #FFFF00
# c0.setOutline("orange") #FFA500
# c0.setOutline("magenta") #FF00FF
# c0.setOutline("violet")
# # c0.setOutline("teal") # unknown #008080 https://en.wikipedia.org/wiki/X11_color_names
# c0.setOutline("chartreuse")
# # c0.setOutline("amber") # unknown
# # c0.setOutline("vermilion") # unknown
# https://en.wikipedia.org/wiki/File:RBG_color_wheel.svg
# red magenta blue cyan green yellow
# rose violet azure spring-green chartreuse orange
# c0.setOutline("red") #FF0000
# c0.setOutline("magenta") #FF00FF
# c0.setOutline("blue") #0000FF
# c0.setOutline("cyan") #00FFFF
# c0.setOutline("green") #00FF00
# c0.setOutline("yellow") #FFFF00
# # c0.setOutline("rose") # unknown
# c0.setOutline("pink") #FFC0CB
# c0.setOutline("violet") #EE82EE
# c0.setOutline("azure") #F0FFFF
# c0.setOutline("spring green") #00FF7F
# c0.setOutline("chartreuse") #7FFF00
# c0.setOutline("orange") #FFA500
radius1 = 10 * scale
diameter1 = radius1 * 2
npoints = 6
inc1 = (math.pi * 2) / npoints
theta1 = 0
xs = []
ys = []
# color1 = ["red", "magenta", "blue", "cyan", "green", "yellow"]
color1 = [red1, rb_magenta1, blue1, gb_cyan1, green1, rg_yellow1]
for i1 in range(npoints):
x1 = (math.sin(theta1) * diameter1) + XCENTER
xs.append(x1)
y1 = (math.cos(theta1) * diameter1) + YCENTER
ys.append(y1)
c1 = Circle(Point(x1, y1), 10 * scale)
c1.setOutline(color1[i1])
c1.setFill(color1[i1])
c1.setWidth(4)
c1.draw(win)
theta1 += inc1
c0 = Circle(Point(XCENTER,YCENTER), 10 * scale)
c0.setWidth(4)
# c0.setOutline("white")
c0.setOutline(white1)
c0.setFill(white1)
# c0.setWidth(10)
# c0.setOutline(rm_rose1)
# c0.setOutline(bm_violet1)
# c0.setOutline(bc_azure1)
# c0.setOutline(gc_green1)
# c0.setOutline(gy_chart1)
# c0.setOutline(ry_orange1)
c0.draw(win)
xa = XCENTER * diameter1
ya = YCENTER * diameter1
xb1 = xs[0] * diameter1
yb1 = ys[0] * diameter1
xc1 = xs[1] * diameter1
yc1 = ys[1] * diameter1
x1 = (xa + xb1 + xc1) / (3 * diameter1)
y1 = (ya + yb1 + yc1) / (3 * diameter1)
c1 = Circle(Point(x1, y1), 10 * scale)
# c1.setOutline("pink")
c1.setOutline(rm_rose1)
c1.setFill(rm_rose1)
c1.setWidth(4)
c1.draw(win)
xb2 = xs[2] * diameter1
yb2 = ys[2] * diameter1
xc2 = xs[3] * diameter1
yc2 = ys[3] * diameter1
x2 = (xa + xb2 + xc2) / (3 * diameter1)
y2 = (ya + yb2 + yc2) / (3 * diameter1)
c2 = Circle(Point(x2, y2), 10 * scale)
# c2.setOutline("azure")
c2.setOutline(bc_azure1)
c2.setFill(bc_azure1)
# c2.setWidth(10)
c2.setWidth(4)
c2.draw(win)
# red magenta blue cyan green yellow
# rose violet azure spring-green chartreuse orange
xb3 = xs[4] * diameter1
yb3 = ys[4] * diameter1
xc3 = xs[5] * diameter1
yc3 = ys[5] * diameter1
x3 = (xa + xb3 + xc3) / (3 * diameter1)
y3 = (ya + yb3 + yc3) / (3 * diameter1)
c3 = Circle(Point(x3, y3), 10 * scale)
# c3.setOutline(gc_green1)
c3.setOutline(gy_chart1)
c3.setFill(gy_chart1)
c3.setWidth(4)
c3.draw(win)
def circles4(win, scale):
c0 = Circle(Point(XCENTER,YCENTER), 10 * scale)
c0.draw(win)
radius1 = 10 * scale
diameter1 = radius1 * 2
npoints = 6
inc1 = (math.pi * 2) / npoints
theta1 = 0
xs = []
ys = []
for i1 in range(npoints):
x1 = (math.sin(theta1) * diameter1) + XCENTER
xs.append(x1)
y1 = (math.cos(theta1) * diameter1) + YCENTER
ys.append(y1)
c1 = Circle(Point(x1, y1), 10 * scale)
c1.draw(win)
theta1 += inc1
xa = XCENTER * diameter1
ya = YCENTER * diameter1
xb1 = xs[0] * diameter1
yb1 = ys[0] * diameter1
xc1 = xs[1] * diameter1
yc1 = ys[1] * diameter1
x1 = (xa + xb1 + xc1) / (3 * diameter1)
y1 = (ya + yb1 + yc1) / (3 * diameter1)
c1 = Circle(Point(x1, y1), 10 * scale)
c1.draw(win)
xb2 = xs[2] * diameter1
yb2 = ys[2] * diameter1
xc2 = xs[3] * diameter1
yc2 = ys[3] * diameter1
x2 = (xa + xb2 + xc2) / (3 * diameter1)
y2 = (ya + yb2 + yc2) / (3 * diameter1)
c2 = Circle(Point(x2, y2), 10 * scale)
c2.draw(win)
xb3 = xs[4] * diameter1
yb3 = ys[4] * diameter1
xc3 = xs[5] * diameter1
yc3 = ys[5] * diameter1
x3 = (xa + xb3 + xc3) / (3 * diameter1)
y3 = (ya + yb3 + yc3) / (3 * diameter1)
c3 = Circle(Point(x3, y3), 10 * scale)
c3.draw(win)
xb4 = xs[5] * diameter1
yb4 = ys[5] * diameter1
xc4 = xs[0] * diameter1
yc4 = ys[0] * diameter1
x4 = (xa + xb4 + xc4) / (3 * diameter1)
y4 = (ya + yb4 + yc4) / (3 * diameter1)
c4 = Circle(Point(x4, y4), 10 * scale)
c4.draw(win)
xb5 = xs[1] * diameter1
yb5 = ys[1] * diameter1
xc5 = xs[2] * diameter1
yc5 = ys[2] * diameter1
x5 = (xa + xb5 + xc5) / (3 * diameter1)
y5 = (ya + yb5 + yc5) / (3 * diameter1)
c5 = Circle(Point(x5, y5), 10 * scale)
c5.draw(win)
xb6 = xs[3] * diameter1
yb6 = ys[3] * diameter1
xc6 = xs[4] * diameter1
yc6 = ys[4] * diameter1
x6 = (xa + xb6 + xc6) / (3 * diameter1)
y6 = (ya + yb6 + yc6) / (3 * diameter1)
c6 = Circle(Point(x6, y6), 10 * scale)
c6.draw(win)
def circles3(win, scale):
c0 = Circle(Point(XCENTER,YCENTER), 10 * scale)
c0.draw(win)
radius1 = 10 * scale
diameter1 = radius1 * 2
npoints = 6
inc1 = (math.pi * 2) / npoints
theta1 = 0
xs = []
ys = []
for i1 in range(npoints):
x1 = (math.sin(theta1) * diameter1) + XCENTER
xs.append(x1)
y1 = (math.cos(theta1) * diameter1) + YCENTER
ys.append(y1)
c1 = Circle(Point(x1, y1), 10 * scale)
c1.draw(win)
theta1 += inc1
xa = XCENTER * diameter1
ya = YCENTER * diameter1
xb1 = xs[0] * diameter1
yb1 = ys[0] * diameter1
xc1 = xs[1] * diameter1
yc1 = ys[1] * diameter1
x1 = (xa + xb1 + xc1) / (3 * diameter1)
y1 = (ya + yb1 + yc1) / (3 * diameter1)
c1 = Circle(Point(x1, y1), 10 * scale)
c1.draw(win)
xb2 = xs[2] * diameter1
yb2 = ys[2] * diameter1
xc2 = xs[3] * diameter1
yc2 = ys[3] * diameter1
x2 = (xa + xb2 + xc2) / (3 * diameter1)
y2 = (ya + yb2 + yc2) / (3 * diameter1)
c2 = Circle(Point(x2, y2), 10 * scale)
c2.draw(win)
xb3 = xs[4] * diameter1
yb3 = ys[4] * diameter1
xc3 = xs[5] * diameter1
yc3 = ys[5] * diameter1
x3 = (xa + xb3 + xc3) / (3 * diameter1)
y3 = (ya + yb3 + yc3) / (3 * diameter1)
c3 = Circle(Point(x3, y3), 10 * scale)
c3.draw(win)
def circles2(win, scale):
c0 = Circle(Point(XCENTER,YCENTER), 10 * scale)
c0.draw(win)
radius1 = 10 * scale
diameter1 = radius1 * 2
# c1 = Circle(Point(XCENTER + diameter1,YCENTER), 10 * scale)
# c1.draw(win)
# c2 is at 60 degrees, same diameter
npoints = 6
inc1 = (math.pi * 2) / npoints
# inc1 = (math.pi) / npoints
theta1 = 0
# x2 = (math.sin(theta1) * diameter1) + XCENTER
# y2 = (math.cos(theta1) * diameter1) + YCENTER
# c2 = Circle(Point(x2, y2), 10 * scale)
# c2.draw(win)
# theta1 += inc1
# x3 = (math.sin(theta1) * diameter1) + XCENTER
# y3 = (math.cos(theta1) * diameter1) + YCENTER
# c3 = Circle(Point(x3, y3), 10 * scale)
# c3.draw(win)
for i1 in range(npoints):
x1 = (math.sin(theta1) * diameter1) + XCENTER
y1 = (math.cos(theta1) * diameter1) + YCENTER
c1 = Circle(Point(x1, y1), 10 * scale)
c1.draw(win)
theta1 += inc1
#for i1 in range(npoints):
# x1 = (math.sin(theta1) * radius) + xoffset
# y1 = (math.cos(theta1) * radius) + yoffset
# hex1(win, x1, y1, scale)
# theta1 += inc1
def circles1(win, xoffset, yoffset, scale = 1.0):
sxoffset = xoffset * scale + XCENTER
syoffset = yoffset * scale + YCENTER
#p = Polygon(
# Point(-4 * scale + sxoffset, -7 * scale + syoffset),
# Point( 4 * scale + sxoffset, -7 * scale + syoffset),
# Point( 8 * scale + sxoffset, 0 * scale + syoffset),
# Point( 4 * scale + sxoffset, 7 * scale + syoffset),
# Point(-4 * scale + sxoffset, 7 * scale + syoffset),
# Point(-8 * scale + sxoffset, 0 * scale + syoffset))
#p.draw(win)
# c = Circle(Point(50 * SCALE,50 * SCALE), 10 * SCALE)
c = Circle(Point(XCENTER,YCENTER), 10 * scale)
c.draw(win)
c1 = Circle(Point(-4 * scale + sxoffset, -7 * scale + syoffset), 10 * scale)
c1.draw(win)
c2 = Circle(Point( 4 * scale + sxoffset, -7 * scale + syoffset), 10 * scale)
c2.draw(win)
c3 = Circle(Point( 8 * scale + sxoffset, 0 * scale + syoffset), 10 * scale)
c3.draw(win)
c4 = Circle(Point( 4 * scale + sxoffset, 7 * scale + syoffset), 10 * scale)
c4.draw(win)
c5 = Circle(Point(-4 * scale + sxoffset, 7 * scale + syoffset), 10 * scale)
c5.draw(win)
c6 = Circle(Point(-8 * scale + sxoffset, 0 * scale + syoffset), 10 * scale)
c6.draw(win)
def main():
radius = 500.0
# scale = 0.5
scale = 10.0
win = GraphWin("circle1", XSCALE, YSCALE)
win.setCoords(0,0, XSCALE , YSCALE)
# one side is 8 units long
# height of vertical rectangle is 14
# bulge to either side is 4
# 1 -> 1
# layer 0
# center
# circle1(win, 0, 0, scale, radius)
# circles1(win, 0, 0, scale)
# circles2(win, scale)
# circles3(win, scale)
# circles4(win, scale)
circles5(win, scale)
# p0 = Point(XCENTER, YCENTER)
# p0.setFill("red")
# p0.setOutline("red")
# p0.draw(win)
# p1 = Point(XCENTER + 12 * scale, YCENTER + 7 * scale)
# l1 = Line(p0, p1)
# l1.setFill("red")
# l1.draw(win)
# t = Text(Point(XCENTER,YCENTER), "0")
# t.draw(win)
win.getMouse()
win.close()
# https://math.stackexchange.com/questions/260096/find-the-coordinates-of-a-point-on-a-circle
# x = rsin(theta), y = rcos(theta)
def circle1(win, xoffset, yoffset, scale = 1.0, radius = 10.0):
hex1(win, xoffset, yoffset, scale)
# theta is degrees or radians?
npoints = 10
npoints = 1
npoints = 100
inc1 = (math.pi * 2) / npoints
theta1 = 0.0
for i1 in range(npoints):
x1 = (math.sin(theta1) * radius) + xoffset
y1 = (math.cos(theta1) * radius) + yoffset
hex1(win, x1, y1, scale)
theta1 += inc1
# math = <module 'math' from '/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/lib-dynload/math.so'>
# acos acos(x) Return the arc cosine (measured in radians) of x.
# acosh acosh(x) Return the inverse hyperbolic cosine of x.
# asin asin(x) Return the arc sine (measured in radians) of x.
# asinh asinh(x) Return the inverse hyperbolic sine of x.
# atan atan(x) Return the arc tangent (measured in radians) of x.
# atan2 atan2(y, x) Return the arc tangent (measured in radians) of y/x. Unlike atan(y/x), the signs of both x and y are considered.
# atanh atanh(x) Return the inverse hyperbolic tangent of x.
# ceil ceil(x) Return the ceiling of x as a float. This is the smallest integral value >= x.
# copysign copysign(x, y) Return x with the sign of y.
# cos cos(x) Return the cosine of x (measured in radians).
# cosh cosh(x) Return the hyperbolic cosine of x.
# degrees degrees(x) Convert angle x from radians to degrees.
# erf erf(x) Error function at x.
# erfc erfc(x) Complementary error function at x.
# exp exp(x) Return e raised to the power of x.
# expm1 expm1(x) Return exp(x)-1. This function avoids the loss of precision involved in the direct evaluation of exp(x)-1 for small x.
# fabs fabs(x) Return the absolute value of the float x.
# factorial factorial(x) -> Integral Find x!. Raise a ValueError if x is negative or non-integral.
# floor floor(x) Return the floor of x as a float. This is the largest integral value <= x.
# fmod fmod(x, y) Return fmod(x, y), according to platform C. x % y may differ.
# frexp frexp(x) Return the mantissa and exponent of x, as pair (m, e). m is a float and e is an int, such that x = m * 2.**e. If x is 0, m and e are both 0. Else 0.5 <= abs(m) < 1.0.
# fsum fsum(iterable) Return an accurate floating point sum of values in the iterable. Assumes IEEE-754 floating point arithmetic.
# gamma gamma(x) Gamma function at x.
# hypot hypot(x, y) Return the Euclidean distance, sqrt(x*x + y*y).
# isinf isinf(x) -> bool Check if float x is infinite (positive or negative).
# isnan isnan(x) -> bool Check if float x is not a number (NaN).
# ldexp ldexp(x, i) Return x * (2**i).
# lgamma lgamma(x) Natural logarithm of absolute value of Gamma function at x.
# log log(x[, base]) Return the logarithm of x to the given base. If the base not specified, returns the natural logarithm (base e) of x.
# log10 log10(x) Return the base 10 logarithm of x.
# log1p log1p(x) Return the natural logarithm of 1+x (base e). The result is computed in a way which is accurate for x near zero.
# modf modf(x) Return the fractional and integer parts of x. Both results carry the sign of x and are floats.
# pow pow(x, y) Return x**y (x to the power of y).
# radians radians(x) Convert angle x from degrees to radians.
# sin sin(x) Return the sine of x (measured in radians).
# sinh sinh(x) Return the hyperbolic sine of x.
# sqrt sqrt(x) Return the square root of x.
# tan tan(x) Return the tangent of x (measured in radians).
# tanh tanh(x) Return the hyperbolic tangent of x.
# trunc trunc(x:Real) -> Integral Truncates x to the nearest Integral toward 0. Uses the __trunc__ magic method.
# math.pi = 3.14159265359
# math.e = 2.71828182846
# phi = 1.61803398875
def hex1(win, xoffset, yoffset, scale = 1.0):
sxoffset = xoffset * scale + XCENTER
syoffset = yoffset * scale + YCENTER
p = Polygon(
Point(-4 * scale + sxoffset, -7 * scale + syoffset),
Point( 4 * scale + sxoffset, -7 * scale + syoffset),
Point( 8 * scale + sxoffset, 0 * scale + syoffset),
Point( 4 * scale + sxoffset, 7 * scale + syoffset),
Point(-4 * scale + sxoffset, 7 * scale + syoffset),
Point(-8 * scale + sxoffset, 0 * scale + syoffset))
p.draw(win)
def old_main():
scale = 7.7
win = GraphWin("hex2", XSCALE, YSCALE)
win.setCoords(0,0, XSCALE , YSCALE)
# one side is 8 units long
# height of vertical rectangle is 14
# bulge to either side is 4
# 1 -> 1
# layer 0
# center
hex1(win, 0, 0, scale)
# 6 -> 7
# layer 1
# 1.1 upper right -> lastx + 12, lasty + 7
hex1(win, 12, 7, scale)
# 1.2 lower right -> lastx + 12, lasty - 7
hex1(win, 12, -7, scale)
# 1.3 bottom -> lastx , lasty - 14
hex1(win, 0, -14, scale)
# 1.4 lower left -> lastx - 12, lasty - 7
hex1(win, -12, -7, scale)
# 1.5 upper left -> lastx - 12, lasty + 7
hex1(win, -12, 7, scale)
# 1.6 top -> lastx , lasty + 14
hex1(win, 0, 14, scale)
# 12 -> 19
# layer 2
# 2.1 one o'clock
hex1(win, 12, 21, scale)
# 2.2 two o'clock
hex1(win, 24, 14, scale)
# 2.3 three o'clock
hex1(win, 24, 0, scale)
# 2.4 four o'clock
hex1(win, 24, -14, scale)
# 2.5 five o'clock
hex1(win, 12, -21, scale)
# 2.6 six o'clock
hex1(win, 0, -28, scale)
# 2.7 seven o'clock
hex1(win, -12, -21, scale)
# 2.8 eight o'clock
hex1(win, -24, -14, scale)
# 2.9 nine o'clock
hex1(win, -24, 0, scale)
# 2.10 ten o'clock
hex1(win, -24, 14, scale)
# 2.11 eleven o'clock
hex1(win, -12, 21, scale)
# 2.12 twelve o'clock
hex1(win, 0, 28, scale)
# 18 -> 37
# layer 3
# 3.1 above one o'clock
hex1(win, 12, 35, scale)
# 3.2 above two o'clock
hex1(win, 24, 28, scale)
# 3.3 shift one o'clock
hex1(win, 36, 21, scale)
# 3.4 down from 3
hex1(win, 36, 7, scale)
# 3.5 down from 4
hex1(win, 36, -7, scale)
# 3.6 down from 5
hex1(win, 36, -21, scale)
# 3.7 down from four o'clock
hex1(win, 24, -28, scale)
# 3.8 down from five o'clock
hex1(win, 12, -35, scale)
# 3.9 bottom
hex1(win, 0, -42, scale)
# 3.10 down from seven o'clock
hex1(win, -12, -35, scale)
# 3.11 down from eight o'clock
hex1(win, -24, -28, scale)
# 3.12
hex1(win, -36, -21, scale)
# 3.13 up from 12
hex1(win, -36, -7, scale)
# 3.14 up from 13
hex1(win, -36, 7, scale)
# 3.15 up from 14
hex1(win, -36, 21, scale)
# 3.16 up from ten o'clock
hex1(win, -24, 28, scale)
# 3.17 up from eleven o'clock
hex1(win, -12, 35, scale)
# 3.18 top
hex1(win, 0, 42, scale)
# 24 -> 61
# layer 4
# 4.1 above 3.1 must be 40 to 63
hex1(win, 12, 49, scale)
# 4.2 above 3.2 must be 40 to 63
hex1(win, 24, 42, scale)
# 4.3 above 3.3 must be 40 to 63
hex1(win, 36, 35, scale)
# 4.4 must be 44, 45, 46, 47, 60, 61, 62, 63
hex1(win, 48, 28, scale)
# 4.5 down from 4.4
hex1(win, 48, 14, scale)
# 4.6 down from 5
hex1(win, 48, 0, scale)
# 4.7 down from 6
hex1(win, 48, -14, scale)
# 4.8 down from 7 must be 9, 11, 25, 27, 41, 43, 57 or 59
hex1(win, 48, -28, scale)
# 4.9
hex1(win, 36, -35, scale)
# 4.10
hex1(win, 24, -42, scale)
# 4.11
hex1(win, 12, -49, scale)
# 4.12 bottom
hex1(win, 0, -56, scale)
# 4.13
hex1(win, -12, -49, scale)
# 4.14
hex1(win, -24, -42, scale)
# 4.15 must be 17, 21, 25, 29, 49, 53, 57 or 61
hex1(win, -36, -35, scale)
# 4.16
hex1(win, -48, -28, scale)
# 4.17
hex1(win, -48, -14, scale)
# 4.18
hex1(win, -48, 0, scale)
# 4.19
hex1(win, -48, 14, scale)
# 4.20
hex1(win, -48, 28, scale)
# 4.21
hex1(win, -36, 35, scale)
# 4.22
hex1(win, -24, 42, scale)
# 4.23
hex1(win, -12, 49, scale)
# 4.24 top must be 24 to 31
hex1(win, 0, 56, scale)
# 5.10 top must be 63 - 1 = 62
hex1(win, 0, 70, scale)
t = Text(Point(XCENTER,YCENTER + 70 * scale), "62")
t.draw(win)
# 5.20 lower right axis must be 63 - 16 = 47
hex1(win, 60, -35, scale)
t = Text(Point(XCENTER + 60 * scale,YCENTER - 35 * scale), "47")
t.draw(win)
# 5.30 lower left axis must be 63 - 8 = 55
hex1(win, -60, -35, scale)
t = Text(Point(XCENTER - 60 * scale,YCENTER - 35 * scale), "55")
t.draw(win)
# 30 -> 91
# layer 5
# 36 -> 127
# layer 6
# 42 -> 169 64, 128, 192, 256, 320
# layer 6
# 7 48 -> 217
# 8 54 -> 261
p0 = Point(XCENTER, YCENTER)
p0.setFill("red")
p0.setOutline("red")
p0.draw(win)
p1 = Point(XCENTER + 12 * scale, YCENTER + 7 * scale)
l1 = Line(p0, p1)
l1.setFill("red")
l1.draw(win)
t = Text(Point(XCENTER,YCENTER), "0")
t.draw(win)
win.getMouse()
win.close()
main()
#
#
# __
#/ \
#\__/
#
# ____
# / \
#/ \
#\ /
# \____/
#
# 5
# __ __
# / \
# 4 3
# / 0 \ 000000
# \ /
# 1 2
# \__ __/
# 0
#
# 5
# __ __
# / \
# 4 3
# / 1 \ 000001
# \ /
# 1 2
# \______/
# 0
#
# 5
# __ __
# / \
# 4 3
# / 2 \ 000010
# \ /
# 1 \ 2
# \__ __/
# 0
#
# 5
# __ __
# / \
# 4 3
# / 3 \ 000011
# \ /
# 1 \ 2
# \______/
# 0
#
# 5
# __ __
# / \
# 4 3
# / 4 \ 000100
# \ /
# 1 / 2
# \__ __/
# 0
#
#
# 5
# ______
# / \
# 4 / \ 3
# / 61 \ 111101
# \ /
# 1 / 2
# \______/
# 0
#
# 5
# ______
# / \
# 4 / \ 3
# / 62 \ 111110
# \ /
# 1 \ / 2
# \__ __/
# 0
#
# 5
# ______
# / \
# 4 / \ 3
# / 63 \ 111111
# \ /
# 1 \ / 2
# \______/
# 0
|
mit
| 1,088,411,825,872,629,100
| 30.33461
| 202
| 0.539352
| false
| 2.628281
| false
| false
| false
|
ucloud/uai-sdk
|
uaitrain/operation/pack_docker_image/pytorch_pack_op.py
|
1
|
1534
|
# Copyright 2017 The UAI-SDK Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from uaitrain.operation.pack_docker_image.base_pack_op import BaseUAITrainDockerImagePackOp
class PytorchUAITrainDockerImagePackOp(BaseUAITrainDockerImagePackOp):
def __init__(self, parser):
super(PytorchUAITrainDockerImagePackOp, self).__init__(parser)
self.ai_arch = "pytorch"
def _gen_gpu_docker_cmd(self, pycmd):
gpu_docker_cmd = "sudo nvidia-docker run -it " + \
"-v " + self.test_data_path + ":" + "/data/data " + \
"-v " + self.test_output_path + ":" + "/data/output " + \
self.user_gpu_image + " " + "/bin/bash -c " + \
"\"cd /data && /usr/bin/python " + pycmd + " " + "--num_gpus=1 --work_dir=/data --data_dir=/data/data --output_dir=/data/output --log_dir=/data/output\""
return gpu_docker_cmd
|
apache-2.0
| 1,090,517,671,928,776,800
| 50.133333
| 178
| 0.614081
| false
| 3.79703
| false
| false
| false
|
Alex9029/awesomeTHz-Python-webapp
|
www/models.py
|
1
|
1470
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Models for user, blog and comment.
'''
__author__ = 'Chalex'
import time, uuid
from transwarp.db import next_id
from transwarp.orm import Model, StringField, BooleanField, FloatField, TextField
class User(Model):
__table__ = 'users'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
email = StringField(updatable=False, ddl='varchar(50)')
password = StringField(ddl='varchar(50)')
admin = BooleanField()
name = StringField(ddl='varchar(50)')
image = StringField(ddl='varchar(500)')
created_at = FloatField(updatable=False, default=time.time)
class Blog(Model):
__table__ = 'blogs'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
user_id = StringField(updatable=False, ddl='varchar(50)')
user_name = StringField(ddl='varchar(50)')
user_image = StringField(ddl='varchar(500)')
name = StringField(ddl='varchar(50)')
summary = StringField(ddl='varchar(200)')
content = TextField()
created_at = FloatField(updatable=False, default=time.time)
class Comment(Model):
__table__ = 'comments'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
blog_id = StringField(updatable=False, ddl='varchar(50)')
user_id = StringField(updatable=False, ddl='varchar(50)')
user_name = StringField(ddl='varchar(50)');
user_image = StringField(ddl='varchar(500)')
content = TextField()
created_at = FloatField(updatable=False, default=time.time)
|
gpl-2.0
| -2,106,225,807,252,490,500
| 29.645833
| 81
| 0.710884
| false
| 3.043478
| false
| false
| false
|
eubr-bigsea/tahiti
|
migrations/versions/c6x2kllv52os_sklearn_operations.py
|
1
|
5027
|
# -*- coding: utf-8 -*-
"""Adding Scikit-learn Operations
Revision ID: c6x2kllv52os
Revises: bca9291ljsj5
Create Date: 2018-06-14 10:42:09.555626
"""
from alembic import context
from alembic import op
from sqlalchemy import String, Integer, Text
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import table, column,text
# revision identifiers, used by Alembic.
revision = 'c6x2kllv52os'
down_revision = 'bca9291ljsj5'
branch_labels = None
depends_on = None
def _insert_platform():
tb = table(
'platform',
column('id', Integer),
column('slug', String),
column('enabled', Integer),
column('icon', String), )
columns = ('id', 'slug', 'enabled', 'icon')
data = [
(4, 'scikit-learn', 1, '')
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _insert_platform_translation():
tb = table(
'platform_translation',
column('id', Integer),
column('locale', String),
column('name', String),
column('description', String), )
columns = ('id', 'locale', 'name', 'description')
data = [
(4, 'en', 'Scikit-learn', 'Scikit-learn 0.19.1'),
(4, 'pt', 'Scikit-learn', 'Scikit-learn 0.19.1'),
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
def _add_operations_platform_from_spark():
tb = table(
'operation_platform',
column('operation_id', Integer),
column('platform_id', Integer))
columns = ('operation_id', 'platform_id')
data = [
(3001, 4), # data-reader
(30, 4), # data-writer
(24, 4),#'add-columns'
(12, 4),#'add-rows'
(15, 4),#'aggregation'
(21, 4),#'clean-missing'
(37, 4),#'difference'
(3014, 4),#'drop
(5, 4), #filter
(16, 4),#'join'
(6, 4), #'projection'
(23, 4),#'remove-duplicated-rows'
(27, 4), #replace-value
(28, 4),#'sample'
(13, 4),#'set-intersection'
(32, 4),#'sort'
(17, 4),#'split'
(7, 4), #'tranformation'
(25, 4),#'comment'
(3031, 4),#'read-shapefile'
(55, 4),#'within'
(41, 4), # Feature indexer
(92, 4), # Max-abs scaler
(91, 4), # Min-max scaler
(90, 4), # Standard scaler
(75, 4), # One Hot Encoder
(41, 4), # Feature Assembler
(95, 4), # PCA
(3026, 4), # Load model
(3027, 4), # Save model
(42, 4), # Apply model
(73, 4), # Regression Model
(78, 4), # Random Forest Regressor
(8, 4), # Linear Regression
(74, 4), # IsotonicRegression
(49, 4), #tokenizer
(50, 4), #remove-stop-words
(51, 4), #generate-n-grams
(52, 4), # word-to-vector
(10, 4), # clustering-model
(56, 4), # gaussian-mixture-clustering
(29, 4), # k-means-clustering
(48, 4), # lda-clustering
(1, 4), # classification-model
(4, 4), # naive-bayes-classifier
(9, 4), # svm-classification
(3005, 4), # knn-classifier
(3008, 4), # logistic-regression
(3, 4), # fp-growth
(85, 4), # association rule
(86, 4), # sequence mining
(26, 4), #publish-as-visualization
(35, 4), #table-visualization
(68, 4), #line-chart
(69, 4), #bar-chart
(70, 4), #pie-chart
(71, 4), #area-chart
(80, 4), #scatter-plot
(81, 4), #summary-statistics
(88, 4) , #map-chart
(89, 4), #donut-chart
]
rows = [dict(list(zip(columns, row))) for row in data]
op.bulk_insert(tb, rows)
all_commands = [
(_insert_platform, 'DELETE FROM platform WHERE id = 4' ),
(_insert_platform_translation,
'DELETE FROM platform_translation WHERE id = 4'),
(_add_operations_platform_from_spark,
'DELETE FROM operation_platform WHERE platform_id = 4'),
]
def upgrade():
ctx = context.get_context()
session = sessionmaker(bind=ctx.bind)()
connection = session.connection()
try:
for cmd in all_commands:
if isinstance(cmd[0], str):
connection.execute(cmd[0])
elif isinstance(cmd[0], list):
for row in cmd[0]:
connection.execute(row)
else:
cmd[0]()
except:
session.rollback()
raise
session.commit()
def downgrade():
ctx = context.get_context()
session = sessionmaker(bind=ctx.bind)()
connection = session.connection()
try:
for cmd in reversed(all_commands):
if isinstance(cmd[1], str):
connection.execute(cmd[1])
elif isinstance(cmd[1], list):
for row in cmd[1]:
connection.execute(row)
else:
cmd[1]()
except:
session.rollback()
raise
session.commit()
|
apache-2.0
| -6,946,335,271,519,121,000
| 25.046632
| 64
| 0.527153
| false
| 3.429059
| false
| false
| false
|
MizzouCMMSGroup1/ChromosomeModel
|
src/conjugate_gradient_plot.py
|
1
|
4967
|
# some imports we use
import numpy
import random
import math
from scipy import optimize
# matplot lib
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
# our data dimension (*xyz), we are using megabase resolution
NUMBER_CONTACTS = 157
NUMBER_CONTACTS_POINTS = NUMBER_CONTACTS * 3
# bring in our cleaned data
IF_FILENAME = "if_data_stripped.csv"
if_data_raw = numpy.loadtxt(IF_FILENAME, delimiter=',')
# used to normalize our IF weights
IF_TOTAL = numpy.sum(if_data_raw)
# chromosome 7 weighting scores from paper, megabase
W1 = 1.0
W2 = 1.5
W3 = 1.5
W4 = 1.5
# micrometers
d_sq_min = 0.2
da_sq_max = 1.8
d_max = 4.5
d_sq_c = 7.0
d_sq_max = d_max * d_max # not defined in paper?
# some globals
coordinate_data = numpy.zeros(NUMBER_CONTACTS_POINTS)
def setup_testing(number_contacts=157):
# 157 is our default/actual size of c7
global if_data, IF_TOTAL
global NUMBER_CONTACTS, NUMBER_CONTACTS_POINTS
NUMBER_CONTACTS = number_contacts
NUMBER_CONTACTS_POINTS = NUMBER_CONTACTS * 3
if_data = if_data_raw[0:number_contacts,0:number_contacts]
IF_TOTAL = numpy.sum(if_data)
coordinate_data = numpy.zeros(number_contacts)
def init_model(bounding_box=0.5):
global coordinate_data
for i in range(0,NUMBER_CONTACTS_POINTS):
coordinate_data[i] = bounding_box * (0.5 - random.random())
return coordinate_data
def print_model():
global coordinate_data
print coordinate_data
def max_if(i,j):
return max(if_data[i,j], if_data[j,i])
def distance_sq(i,j):
a = [coordinate_data[i], coordinate_data[i+1], coordinate_data[i+2]]
b = [coordinate_data[j], coordinate_data[j+1], coordinate_data[j+2]]
return (a[0] - b[0])**2 + (a[1] - b[1])**2 + (a[2] - b[2])**2
def contact_score():
'''
minimize the distance (but keep above min_threshold) between non-sequential pairs that have affinity
'''
global IF_TOTAL
score = 0
for i in range(0,NUMBER_CONTACTS):
for j in range(0,NUMBER_CONTACTS):
if i != j or abs(i-j) != 1:
d_sq_ij = distance_sq(i,j)
score += W1 * math.tanh(d_sq_c - d_sq_ij) * max_if(i,j) + W2 * math.tanh(d_sq_ij - d_sq_min) / IF_TOTAL
return score
def noncontact_score():
'''
maximize the distance (but keep below max_threshold) between non-sequential pairs that don't have affinity
'''
global IF_TOTAL
score = 0
for i in range(0,NUMBER_CONTACTS):
for j in range(0,NUMBER_CONTACTS):
if i != j or abs(i-j) != 1:
d_sq_ij = distance_sq(i,j)
score += W3 * math.tanh(d_sq_max - d_sq_ij) / IF_TOTAL + W4 * math.tanh(d_sq_ij - d_sq_c) / IF_TOTAL
return score
def pair_smoothing():
'''
keep adjacent contacts (eg |i-j|==1) with slightly lower score than above so they are prioritized for optimization
'''
global IF_TOTAL
score = 0
for i in range(0,NUMBER_CONTACTS):
for j in range(0,NUMBER_CONTACTS):
if abs(i-j) == 1:
d_sq_ij = distance_sq(i,j)
score += W1 * max_if(i,j) * math.tanh(da_sq_max - d_sq_ij) + W2 * math.tanh(d_sq_ij - d_sq_min) / IF_TOTAL
return score
def model_score():
return contact_score() + noncontact_score() + pair_smoothing()
# shim between skeleton and cg code
iter_tracker = 0
old_score = 0
def f(x, *args):
#print x
global iter_tracker, old_score
iter_tracker += 1
global coordinate_data
for i in range(0,NUMBER_CONTACTS_POINTS):
coordinate_data[i] = x[i]
current_score = model_score()
#print "iter:", iter_tracker, "score:", current_score, "change:", current_score - old_score
old_score = current_score
return current_score
def main():
global iter_tracker
setup_testing(50)
TESTING_CONGUGATE_GRADIENT = True
#TESTING_CONGUGATE_GRADIENT = False
random_start = init_model().copy()
args = []
opts = {'maxiter' : 100, 'disp' : True }
results = 0
if (TESTING_CONGUGATE_GRADIENT):
results = optimize.minimize(f, random_start, args=args, method='CG', options=opts)
else:
results = optimize.minimize(f, random_start, args=args, method='Anneal', options=opts)
print "internal iter: ", iter_tracker
print results
print "saving final contact xyz coordinates"
x = numpy.zeros(NUMBER_CONTACTS)
y = numpy.zeros(NUMBER_CONTACTS)
z = numpy.zeros(NUMBER_CONTACTS)
for i in range(0,NUMBER_CONTACTS):
x[i] = results.x[i]
y[i] = results.x[i+1]
z[i] = results.x[i+2]
print results.x[i], results.x[i+1], results.x[i+2]
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(x, y, z, label='3d plot of generated contacts')
ax.legend()
plt.show()
if __name__ == '__main__':
main()
|
gpl-2.0
| 1,627,960,150,278,846,200
| 25.280423
| 122
| 0.624723
| false
| 3.041641
| false
| false
| false
|
wrgeorge1983/Pystol
|
_iactive.py
|
1
|
15584
|
#! /usr/bin/python
"""
Created on Mar 26, 2015
@author: William.George
Credit to /r/Python for the non-wasteful and sensible handling of oldInit and
newInit
"""
# Standard Library Imports
import os
import sys
import json
from pprint import pprint # Not used here, but we want it in interactive mode.
import time
from subprocess import Popen
from collections import defaultdict
import multiprocessing.pool
import collections
sys.path += [os.getcwd()]
# Imports from other modules in this project
import sshutil
# Imports from third party modules
import phpipam
import ipaddress
import openpyxl
DEFAULT_SW_IP = '10.10.10.10'
DEFAULT_HOST_IP = '10.10.10.10'
DEFAULT_IPAM_HOST = 'ipam'
DEFAULT_IPAM_API_ID = 'ipam'
DEFAULT_IPAM_API_KEY = 'FFFFF'
try:
import iactiveconstants
DEFAULT_SW_IP = iactiveconstants.DEFAULT_SW_IP
DEFAULT_HOST_IP = iactiveconstants.DEFAULT_HOST_IP
DEFAULT_IPAM_HOST = iactiveconstants.DEFAULT_IPAM_HOST
DEFAULT_IPAM_API_ID = iactiveconstants.DEFAULT_IPAM_API_ID
DEFAULT_IPAM_API_KEY = iactiveconstants.DEFAULT_IPAM_API_KEY
except ImportError:
pass
# File class from user fdb on StackOverflow
# http://stackoverflow.com/questions/5896079/python-head-tail-and-backward-read-by-lines-of-a-text-file
class File(file):
""" An helper class for file reading """
def __init__(self, *args, **kwargs):
super(File, self).__init__(*args, **kwargs)
self.BLOCKSIZE = 4096
def head(self, lines_2find=1):
self.seek(0) # Rewind file
return [super(File, self).next() for x in xrange(lines_2find)]
def tail(self, lines_2find=1):
self.seek(0, 2) # Go to end of file
bytes_in_file = self.tell()
lines_found, total_bytes_scanned = 0, 0
while (lines_2find + 1 > lines_found and
bytes_in_file > total_bytes_scanned):
byte_block = min(
self.BLOCKSIZE,
bytes_in_file - total_bytes_scanned)
self.seek(-(byte_block + total_bytes_scanned), 2)
total_bytes_scanned += byte_block
lines_found += self.read(self.BLOCKSIZE).count('\n')
self.seek(-total_bytes_scanned, 2)
line_list = list(self.readlines())
return line_list[-lines_2find:]
def backward(self):
self.seek(0, 2) # Go to end of file
blocksize = self.BLOCKSIZE
last_row = ''
while self.tell() != 0:
try:
self.seek(-blocksize, 1)
except IOError:
blocksize = self.tell()
self.seek(-blocksize, 1)
block = self.read(blocksize)
self.seek(-blocksize, 1)
rows = block.split('\n')
rows[-1] = rows[-1] + last_row
while rows:
last_row = rows.pop(-1)
if rows and last_row:
yield last_row
yield last_row
def ipm(site, ipt):
"""
ipm(site, ipt):
site: An IP or Network address in dotted-decimal in a string.
e.g. "10.10.8.6" or "10.10.0.0"
ipt: 'input', trailing octets to be merged with site
as string:
e.g. "7" or "9.1.3"
or as int or float:
e.g. 7 or 3.8
Returns: trailing octets defined by ipt super-imposed on site
e.g. site("10.10.8.6", "1.2.3") == "10.1.2.3"
site("10.10.8.6", 5.1) == "10.10.5.1"
Note: It's possible that 'site' can be specified as 3 or fewer
('10.3', etc...) but this is probably not smart.
Note: This works exclusively by manipulating a string of octets
in dotted decimal format, and does not in any way account for
any real subnetting operations, etc...
"""
ipt = str(ipt).split('.')
site = site.split('.')
return '.'.join(site[:4 - len(ipt)] + ipt)
# TODO: This should really be wrapped in a class
def pull_subnets():
ipam = phpipam.PHPIPAM(DEFAULT_IPAM_HOST, DEFAULT_IPAM_API_ID,
DEFAULT_IPAM_API_KEY)
ipam.scheme = 'https'
rslt = ipam.read_subnets()
jload = json.loads(rslt)
subnets = jload['data']
return subnets
def site_lookup(sfilter):
subnets = pull_subnets()
return [subnets[x] for x in range(0, len(subnets) - 1) if sfilter in
subnets[x]['description']]
class IPAMController(object):
"""Generic wrapper for JSON objects returned by ipam api"""
def __init__(self, ipam, data=None, **kwargs):
"""Takes either the JSON data by itself or unpacked keywords.
if unpacked values are passed, ensure only the 'data' portion
of the result is sent. i.e.:
rslt = ipam.read_subnets(id=1)
rslt = json.loads(rslt)['data']
subnet
"""
self.ipam = ipam
if data is not None:
kwargs = json.loads(data)['data']
# Unsure if this is consistent or not, but I've seen it at least once
if type(kwargs) is list:
kwargs = kwargs[0]
for k, v in kwargs.items():
setattr(self, k, v)
class IPAMSubnet(IPAMController):
"""Wrap subnet JSON objects that come from phpipam"""
def __init__(self, **kwargs):
IPAMController.__init__(self, **kwargs)
net, mask = self.subnet, self.mask
try:
self.network = ipaddress.ip_network(u'{0}/{1}'.format(net, mask))
except ValueError:
self.network = 'INVALID'
self._site_codes = []
def _pull_site_codes(self):
subnet_id = self.id
addresses = self.ipam.generic('addresses', 'read', subnetId=subnet_id, format='ip')
addresses = json.loads(addresses)['data']
names = (x['dns_name'] for x in addresses)
site_codes = (x[5:8] for x in names)
self._site_codes = set(site_codes)
@property
def site_codes(self):
if len(self._site_codes) == 0:
self._pull_site_codes()
return self._site_codes
def __str__(self):
return str(self.network)
class IPAM(phpipam.PHPIPAM):
"""Handle subnets and addresses meaningfully"""
def __init__(self,
url=DEFAULT_IPAM_HOST,
api_id=DEFAULT_IPAM_API_ID,
api_key=DEFAULT_IPAM_API_KEY,
scheme='https'):
phpipam.PHPIPAM.__init__(self, url, api_id, api_key)
self.scheme = scheme
self._subnets = None
self._raw_subnets = None
self._addresse = None
def _pull_raw_subnets(self):
rslt = self.read_subnets()
jload = json.loads(rslt)
self._raw_subnets = jload['data']
@property
def raw_subnets(self):
if self._raw_subnets is None:
self._pull_raw_subnets()
return self._raw_subnets
def _pull_subnets(self):
self._subnets = {}
for subnet in self.raw_subnets:
self._subnets[subnet[u'id']] = IPAMSubnet(ipam=self, **subnet)
@property
def subnets(self, subnet_id=None):
"""access one or all subnets"""
if self._subnets is None:
self._pull_subnets()
if subnet_id is not None:
return self._subnets[subnet_id]
return self._subnets
def audit_subnets(self):
rslt = True
for subnet in self.subnets.values():
try:
net, mask = subnet.subnet, subnet.mask
subnet.network = ipaddress.ip_network(u'{0}/{1}'.format(net, mask))
except ValueError as e:
rslt = False
print e
return rslt
# Wrapps Switch() with features that are great for interactive access,
# but would be terrible to use in an normal script.
class clintSwitch(sshutil.Switch):
def __init__(self, ip=None, creds=None, timeout=None):
if timeout:
self.timeout = timeout
elif not hasattr(self, 'timeout'):
self.timeout = None
if creds:
clintSwitch.credentials = creds
else:
if not hasattr(self, "credentials"):
raise SyntaxError("Credentials must be provided at least once.")
creds = self.credentials
if ip:
ip = str(ip)
site = ip
ips = ip.split('.')
if len(ips) == 4:
clintSwitch.site = site
else:
if not hasattr(self, 'site'):
raise SyntaxError("Full IP must be provided at least once.")
ip = ipm(clintSwitch.site, ip)
clintSwitch.site = ip
else:
ip = 'None'
sshutil.Switch.__init__(self, ip, creds)
@property
def flash_total(self):
try:
return self.flash.total
except:
return 'UNK'
@property
def flash_free(self):
try:
return self.flash.free
except:
return 'UNK'
def pexecute(self, cmd, trim=True, timeout=None):
args = [cmd, trim]
if not timeout:
timeout = self.timeout
if timeout:
args.append(timeout)
print self.execute(*args)
def interact(self):
cmd = 'ssh {0}'.format(self.ip)
Popen(cmd, shell=True).communicate()
def bufferflush(self):
return self.connection.buffer_flush()
def poll_switch(sw, cmd, sleep_time):
"""sw.pexecute(cmd) every sleep_time seconds"""
while True:
sw.pexecute(cmd)
time.sleep(sleep_time)
def pythonrc():
"""Return expanded path to current users .pythonrc.py"""
home = os.path.expanduser('~/')
return home + '.pythonrc.py'
def retrieve_pcaps(sw):
destcreds = sshutil.get_credentials()
host = DEFAULT_HOST_IP
lines = sw.execute('sh flash: | i pcap').splitlines()
files = [line.split()[-1] for line in lines]
for fil in files:
command = 'copy {0} scp:'.format(fil)
sw.timeout = 2
print 'pulling {0}...'.format(fil)
sw.pexecute(command)
sw.pexecute(host)
sw.pexecute('\n')
sw.pexecute('\n')
sw.pexecute(destcreds[1], 5)
class WorkbookWrapper(object):
def __init__(self, filename):
self.column_from_string = lambda x: openpyxl.utils.column_index_from_string(x) - 1
self.wb = self.load_workbook(filename)
self.ws = self.wb.active
self.rows = self.ws.rows
self.columns = self.ws.columns
self.cell = self.ws.cell
self.build_header()
self.attribute_mapping = defaultdict(lambda: None)
self.attribute_mapping.update( # maps header fields to object attributes
{
'hostname': 'hostname',
'ip address': 'ip',
'supervisor': 'supervisor',
'ram (k)': 'available_ram',
'total flash': 'flash_total',
'free flash': 'flash_free',
'model': 'model',
'stacked': 'stacked',
'current': 'software_version',
'old': 'software_version',
'current': 'software_version',
'feature set (license)': 'license'
}
)
def build_header(self):
"""
Assume header is row A
:return:
"""
header_row = self.rows[0]
header = [(cell.value.lower(), index) for index, cell in enumerate(header_row)
if cell.value is not None]
self.header = defaultdict(str)
for (name, index) in header:
self.header[name] = index
self.header[index] = name
def output_values(self, switches):
"""
Takes switches (for now: manually provided, pre-populated) and outputs their attributes to xlsx.
:param switches:
:return:
"""
am = self.attribute_mapping
header = self.header
for row, switch in zip(self.rows[1:], switches): # skip header row obviously
if switch.state.upper() != 'UP':
continue
for index, cell in enumerate(row):
try:
rslt = getattr(switch, str(am[header[index]]), 'UNK')
except:
rslt = 'UNK'
if rslt != 'UNK':
cell.value = rslt
# def validate_hostname(self, switch, value):
# if switch.hostname == value:
# return True, switch.hostname
# else:
# return False, switch.hostname
#
# def validate_supervisor(self, switch, value):
# sup = switch.supervisor
# return sup == value, sup
#
# @staticmethod
# def validate_switch_attribute(switch, attribute, value):
# ref = getattr(switch, attribute)
# return ref == value, ref
@staticmethod
def load_workbook(filename):
"""
return an xlsx document
:param filename: filename of xlsx doc. Assume it's under ~/stage/
:return:
"""
path = os.path.join('~', 'stage', filename)
path = os.path.expanduser(path)
wb = openpyxl.load_workbook(path)
return wb
def switch_from_row(self, row=None, row_index=None):
if row is None:
assert row_index is not None, "switch_from_row expects row or row_index"
row = self.rows[row_index]
assert row in self.rows, "row must be an existing row in rows"
attrib_from_cell = lambda x: self.header[self.column_from_string(x.column)]
attrs = dict((attrib_from_cell(cell), cell.value) for cell in row
if cell.value is not None)
try:
switch = clintSwitch(ip=attrs['ip address'])
except KeyError:
return None
switch.row_index = row_index
return switch
def switches_from_rows(self):
return [self.switch_from_row(row=row) for row in self.rows[1:]] # skip header!
def get_attribs(self, switch):
pass
# TODO: These are here only for testing purposes and should be pruned / factored out
def populate_switch(switch):
try:
switch.populate_lite()
except:
pass
def test_wb_switches():
global wb
global switches
global pool
global rslts
wb = WorkbookWrapper('bia-netw.xlsx')
switches = [switch for switch in wb.switches_from_rows() if switch is not None]
pool = multiprocessing.pool.ThreadPool(processes=32)
start_time = time.time()
rslts = pool.map_async(populate_switch, switches)
increment_table = {100: 5, 50: 3, 25: 1, 10: 0.5}
remaining_q = []
increment = 5
while True:
remaining_switches = [switch.ip for switch in switches if switch.state == 'UNK']
remaining = len(remaining_switches)
if remaining == 0:
return
seconds = time.time() - start_time
print('{0} remaining after {1} seconds'.format(remaining, seconds))
for key in sorted(increment_table.keys()):
if remaining >= key:
increment = increment_table[key]
else:
break
if remaining in remaining_q: # at least one nonproductive cycle
if len(remaining_q) == 4:
print('Remaining switches:')
pprint(remaining_switches)
else:
remaining_q = []
remaining_q.append(remaining)
time.sleep(increment)
pool.close()
pool.join()
|
lgpl-2.1
| 5,891,277,525,246,864,000
| 29.920635
| 104
| 0.563976
| false
| 3.832759
| false
| false
| false
|
springmerchant/pybbm
|
pybb/migrations/0003_auto_20150424_0918.py
|
1
|
1054
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from pybb.models import create_or_check_slug
def fill_slugs(apps, schema_editor):
Category = apps.get_model("pybb", "Category")
Forum = apps.get_model("pybb", "Forum")
Topic = apps.get_model("pybb", "Topic")
for category in Category.objects.all():
category.slug = create_or_check_slug(instance=category, model=Category)
category.save()
for forum in Forum.objects.all():
extra_filters = {'category': forum.category}
forum.slug = create_or_check_slug(instance=forum, model=Forum, **extra_filters)
forum.save()
for topic in Topic.objects.all():
extra_filters = {'forum': topic.forum}
topic.slug = create_or_check_slug(instance=topic, model=Topic, **extra_filters)
topic.save()
class Migration(migrations.Migration):
dependencies = [
('pybb', '0002_auto_20150424_0918'),
]
operations = [
migrations.RunPython(fill_slugs),
]
|
bsd-2-clause
| -3,424,356,357,889,820,000
| 29.114286
| 87
| 0.647059
| false
| 3.536913
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.