code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
import datetime
def json_datetime_decoder(data):
for key, value in data.items():
if not value:
data[key] = None
if 'datetime' in key:
data[key] = datetime.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ').replace(tzinfo=datetime.timezone.utc)
elif key == 'colors':
data[key] = ','.join(value)
return data
|
AstroMatt/esa-time-perception
|
backend/common/utils.py
|
Python
|
mit
| 378
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-21 08:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('invitations', '0002_auto_20161009_2158'),
]
operations = [
migrations.AlterField(
model_name='toucaninvitation',
name='site',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='sites.Site'),
),
]
|
mcallistersean/b2-issue-tracker
|
toucan/invitations/migrations/0003_auto_20161021_0859.py
|
Python
|
mit
| 555
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/wearables/base/shared_base_backpack.iff"
result.attribute_template_id = 11
result.stfName("wearables_name","backpack_s01")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/tangible/wearables/base/shared_base_backpack.py
|
Python
|
mit
| 459
|
# coding: utf-8
class EmailConfirmationExpired(Exception):
pass
|
ademuk/django-email-confirm-la
|
email_confirm_la/exceptions.py
|
Python
|
mit
| 70
|
# encoding: utf-8
import argparse
import math
import os.path
import pickle
import re
import sys
import time
from nltk.translate import bleu_score
import numpy
import six
import chainer
from chainer import cuda
import chainer.functions as F
import chainer.links as L
from chainer import reporter
from chainer import training
from chainer.training import extensions
import chainermn
import chainermn.functions
import chainermn.links
import europal
def cached_call(fname, func, *args):
if os.path.exists(fname):
with open(fname, 'rb') as f:
return pickle.load(f)
else:
# not yet cached
val = func(*args)
with open(fname, 'wb') as f:
pickle.dump(val, f)
return val
def read_source(in_dir, cache=None):
en_path = os.path.join(in_dir, 'giga-fren.release2.fixed.en')
source_vocab = ['<eos>', '<unk>'] + europal.count_words(en_path)
source_data = europal.make_dataset(en_path, source_vocab)
return source_vocab, source_data
def read_target(in_dir, cahce=None):
fr_path = os.path.join(in_dir, 'giga-fren.release2.fixed.fr')
target_vocab = ['<eos>', '<unk>'] + europal.count_words(fr_path)
target_data = europal.make_dataset(fr_path, target_vocab)
return target_vocab, target_data
def sequence_embed(embed, xs):
x_len = [len(x) for x in xs]
x_section = numpy.cumsum(x_len[:-1])
ex = embed(F.concat(xs, axis=0))
exs = F.split_axis(ex, x_section, 0, force_tuple=True)
return exs
class Encoder(chainer.Chain):
def __init__(
self, comm, n_layers, n_source_vocab, n_target_vocab, n_units):
super(Encoder, self).__init__(
embed_x=L.EmbedID(n_source_vocab, n_units),
# Corresponding decoder LSTM will be invoked on process 1.
mn_encoder=chainermn.links.create_multi_node_n_step_rnn(
L.NStepLSTM(n_layers, n_units, n_units, 0.1),
comm, rank_in=None, rank_out=1
),
)
self.comm = comm
self.n_layers = n_layers
self.n_units = n_units
def __call__(self, *inputs):
xs = inputs[:len(inputs) // 2]
xs = [x[::-1] for x in xs]
exs = sequence_embed(self.embed_x, xs)
# Encode input sequence and send hidden states to decoder.
_, _, _, delegate_variable = self.mn_encoder(exs)
# Last element represents delegate variable.
return delegate_variable
def translate(self, xs, max_length=100):
with chainer.no_backprop_mode():
with chainer.using_config('train', False):
xs = [x[::-1] for x in xs]
exs = sequence_embed(self.embed_x, xs)
# Encode input sequence and send hidden stats to decoder.
self.mn_encoder(exs)
# Encoder does not return anything.
# All evaluation will be done in decoder process.
return None
class Decoder(chainer.Chain):
def __init__(
self, comm, n_layers, n_source_vocab, n_target_vocab, n_units):
super(Decoder, self).__init__(
embed_y=L.EmbedID(n_target_vocab, n_units),
# Corresponding encoder LSTM will be invoked on process 0.
mn_decoder=chainermn.links.create_multi_node_n_step_rnn(
L.NStepLSTM(n_layers, n_units, n_units, 0.1),
comm, rank_in=0, rank_out=None),
W=L.Linear(n_units, n_target_vocab),
)
self.comm = comm
self.n_layers = n_layers
self.n_units = n_units
def __call__(self, *inputs):
xs = inputs[:len(inputs) // 2]
ys = inputs[len(inputs) // 2:]
xs = [x[::-1] for x in xs]
batch = len(xs)
eos = self.xp.zeros(1, self.xp.int32)
ys_in = [F.concat([eos, y], axis=0) for y in ys]
ys_out = [F.concat([y, eos], axis=0) for y in ys]
eys = sequence_embed(self.embed_y, ys_in)
# Receive hidden states from encoder process and decode.
_, _, os, _ = self.mn_decoder(eys)
# It is faster to concatenate data before calculating loss
# because only one matrix multiplication is called.
concat_os = F.concat(os, axis=0)
concat_ys_out = F.concat(ys_out, axis=0)
loss = F.sum(F.softmax_cross_entropy(
self.W(concat_os), concat_ys_out, reduce='no')) / batch
reporter.report({'loss': loss.data}, self)
n_words = concat_ys_out.shape[0]
perp = self.xp.exp(loss.data * batch / n_words)
reporter.report({'perp': perp}, self)
return loss
def translate(self, xs, max_length=100):
batch = len(xs)
with chainer.no_backprop_mode():
with chainer.using_config('train', False):
result = []
ys = self.xp.zeros(batch, self.xp.int32)
eys = self.embed_y(ys)
eys = chainer.functions.split_axis(
eys, batch, 0, force_tuple=True)
# Receive hidden stats from encoder process.
h, c, ys, _ = self.mn_decoder(eys)
cys = chainer.functions.concat(ys, axis=0)
wy = self.W(cys)
ys = self.xp.argmax(wy.data, axis=1).astype(self.xp.int32)
result.append(ys)
# Recursively decode using the previously predicted token.
for i in range(1, max_length):
eys = self.embed_y(ys)
eys = chainer.functions.split_axis(
eys, batch, 0, force_tuple=True)
# Non-MN RNN link can be accessed via `actual_rnn`.
h, c, ys = self.mn_decoder.actual_rnn(h, c, eys)
cys = chainer.functions.concat(ys, axis=0)
wy = self.W(cys)
ys = self.xp.argmax(wy.data, axis=1).astype(self.xp.int32)
result.append(ys)
result = cuda.to_cpu(self.xp.stack(result).T)
# Remove EOS taggs
outs = []
for y in result:
inds = numpy.argwhere(y == 0)
if len(inds) > 0:
y = y[:inds[0, 0]]
outs.append(y)
return outs
def convert(batch, device):
def to_device_batch(batch):
if device is None:
return batch
elif device < 0:
return [chainer.dataset.to_device(device, x) for x in batch]
else:
xp = cuda.cupy.get_array_module(*batch)
concat = xp.concatenate(batch, axis=0)
sections = numpy.cumsum(
[len(x) for x in batch[:-1]], dtype=numpy.int32)
concat_dev = chainer.dataset.to_device(device, concat)
batch_dev = cuda.cupy.split(concat_dev, sections)
return batch_dev
return tuple(
to_device_batch([x for x, _ in batch]) +
to_device_batch([y for _, y in batch]))
class BleuEvaluator(extensions.Evaluator):
def __init__(self, model, test_data, device=-1, batch=100,
max_length=100, comm=None):
super(BleuEvaluator, self).__init__({'main': None}, model)
self.model = model
self.test_data = test_data
self.batch = batch
self.device = device
self.max_length = max_length
self.comm = comm
def evaluate(self):
bt = time.time()
with chainer.no_backprop_mode():
references = []
hypotheses = []
observation = {}
with reporter.report_scope(observation):
for i in range(0, len(self.test_data), self.batch):
src, trg = zip(*self.test_data[i:i + self.batch])
references.extend([[t.tolist()] for t in trg])
src = [chainer.dataset.to_device(self.device, x)
for x in src]
if self.comm.rank == 0:
self.model.translate(src, self.max_length)
elif self.comm.rank == 1:
ys = [y.tolist()
for y in self.model.translate(
src, self.max_length)]
hypotheses.extend(ys)
if self.comm.rank == 1:
bleu = bleu_score.corpus_bleu(
references, hypotheses, smoothing_function=bleu_score.
SmoothingFunction().method1)
reporter.report({'bleu': bleu}, self.model)
et = time.time()
if self.comm.rank == 1:
print('BleuEvaluator(single)::evaluate(): '
'took {:.3f} [s]'.format(et - bt))
sys.stdout.flush()
return observation
def create_optimizer(opt_arg):
"""Parse a string and get an optimizer.
The syntax is:
opt(params...)
where
opt := sgd | adam
param := [float | key=val]...
"""
m = re.match(r'(adam|sgd)\(([^)]*)\)', opt_arg, re.I)
name = m.group(1).lower()
args = m.group(2)
names_dict = {
'adadelta': chainer.optimizers.AdaDelta,
'adagrad': chainer.optimizers.AdaGrad,
'adam': chainer.optimizers.Adam,
'momentumsgd': chainer.optimizers.MomentumSGD,
'nesterovag': chainer.optimizers.NesterovAG,
'rmsprop': chainer.optimizers.RMSprop,
'rmspropgraves': chainer.optimizers.RMSpropGraves,
'sgd': chainer.optimizers.SGD,
'smorms3': chainer.optimizers.SMORMS3,
}
try:
opt = names_dict[name]
except KeyError:
raise RuntimeError('Unknown optimizer: \'{}\' in \'{}\''.format(
name, opt_arg))
# positional arguments
pos = []
# keyword arguments
kw = {}
args = args.strip()
if args:
for a in re.split(r',\s*', args):
if a.find('=') >= 0:
key, val = a.split('=')
kw[key] = float(val)
else:
pos.append(float(a))
return opt(*pos, **kw)
def _get_num_split(excp):
"""Get the preferrable number of split from a DataSizeError error"""
ps = excp.pickled_size
mx = excp.max_size
return (ps + mx - 1) // mx
def _slices(excp):
"""Get a list of slices that are expected to fit in a single send/recv."""
ds = excp.dataset_size
nsplit = _get_num_split(excp)
size = math.ceil(ds / nsplit)
return [(b, min(e, ds)) for b, e in
((i * size, (i + 1) * size) for i in range(0, nsplit))]
def main():
parser = argparse.ArgumentParser(description='Chainer example: seq2seq')
parser.add_argument('--batchsize', '-b', type=int, default=64,
help='Number of images in each mini-batch')
parser.add_argument('--bleu', action='store_true', default=False,
help='Report BLEU score')
parser.add_argument('--gpu', '-g', action='store_true',
help='Use GPU')
parser.add_argument('--cache', '-c', default=None,
help='Directory to cache pre-processed dataset')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--unit', '-u', type=int, default=1024,
help='Number of units')
parser.add_argument('--communicator', default='hierarchical',
help='Type of communicator')
parser.add_argument('--stop', '-s', type=str, default='15e',
help='Stop trigger (ex. "500i", "15e")')
parser.add_argument('--input', '-i', type=str, default='wmt',
help='Input directory')
parser.add_argument('--optimizer', type=str, default='adam()',
help='Optimizer and its argument')
parser.add_argument('--out', '-o', default='result',
help='Directory to output the result')
args = parser.parse_args()
# Prepare ChainerMN communicator
if args.gpu:
comm = chainermn.create_communicator('hierarchical')
dev = comm.intra_rank
else:
comm = chainermn.create_communicator('naive')
dev = -1
if comm.size != 2:
raise ValueError(
'This example can only be executed on exactly 2 processes.')
if comm.rank == 0:
print('==========================================')
print('Num process (COMM_WORLD): {}'.format(comm.size))
if args.gpu:
print('Using GPUs')
print('Using {} communicator'.format(args.communicator))
print('Num unit: {}'.format(args.unit))
print('Num Minibatch-size: {}'.format(args.batchsize))
print('==========================================')
# Both processes prepare datasets.
if comm.rank == 0 or comm.rank == 1:
if args.cache and not os.path.exists(args.cache):
os.mkdir(args.cache)
# Read source data
bt = time.time()
if args.cache:
cache_file = os.path.join(args.cache, 'source.pickle')
source_vocab, source_data = cached_call(cache_file,
read_source,
args.input, args.cache)
else:
source_vocab, source_data = read_source(args.input, args.cache)
et = time.time()
print('RD source done. {:.3f} [s]'.format(et - bt))
sys.stdout.flush()
# Read target data
bt = time.time()
if args.cache:
cache_file = os.path.join(args.cache, 'target.pickle')
target_vocab, target_data = cached_call(cache_file,
read_target,
args.input, args.cache)
else:
target_vocab, target_data = read_target(args.input, args.cache)
et = time.time()
print('RD target done. {:.3f} [s]'.format(et - bt))
sys.stdout.flush()
print('Original training data size: %d' % len(source_data))
train_data = [(s, t)
for s, t in six.moves.zip(source_data, target_data)
if 0 < len(s) < 50 and 0 < len(t) < 50]
print('Filtered training data size: %d' % len(train_data))
en_path = os.path.join(args.input, 'dev', 'newstest2013.en')
source_data = europal.make_dataset(en_path, source_vocab)
fr_path = os.path.join(args.input, 'dev', 'newstest2013.fr')
target_data = europal.make_dataset(fr_path, target_vocab)
assert(len(source_data) == len(target_data))
test_data = [(s, t) for s, t
in six.moves.zip(source_data, target_data)
if 0 < len(s) and 0 < len(t)]
source_ids = {word: index
for index, word in enumerate(source_vocab)}
target_ids = {word: index
for index, word in enumerate(target_vocab)}
else:
train_data, test_data = None, None
target_ids, source_ids = None, None
# Print GPU id
for i in range(0, comm.size):
if comm.rank == i:
print('Rank {} GPU: {}'.format(comm.rank, dev))
sys.stdout.flush()
comm.mpi_comm.Barrier()
# broadcast id -> word dictionary
source_ids = comm.bcast_obj(source_ids, root=0)
target_ids = comm.bcast_obj(target_ids, root=0)
target_words = {i: w for w, i in target_ids.items()}
source_words = {i: w for w, i in source_ids.items()}
if comm.rank == 0:
print('target_words : {}'.format(len(target_words)))
print('source_words : {}'.format(len(source_words)))
n_lstm_layers = 3
if comm.rank == 0:
model = Encoder(
comm, n_lstm_layers, len(source_ids), len(target_ids), args.unit)
elif comm.rank == 1:
model = Decoder(
comm, n_lstm_layers, len(source_ids), len(target_ids), args.unit)
if dev >= 0:
chainer.cuda.get_device_from_id(dev).use()
model.to_gpu(dev)
# determine the stop trigger
m = re.match(r'^(\d+)e$', args.stop)
if m:
trigger = (int(m.group(1)), 'epoch')
else:
m = re.match(r'^(\d+)i$', args.stop)
if m:
trigger = (int(m.group(1)), 'iteration')
else:
if comm.rank == 0:
sys.stderr.write('Error: unknown stop trigger: {}'.format(
args.stop))
exit(-1)
if comm.rank == 0:
print('Trigger: {}'.format(trigger))
optimizer = create_optimizer(args.optimizer)
optimizer.setup(model)
train_iter = chainer.iterators.SerialIterator(train_data,
args.batchsize,
shuffle=False)
updater = training.StandardUpdater(
train_iter, optimizer, converter=convert, device=dev)
trainer = training.Trainer(updater,
trigger,
out=args.out)
# Do not use multi node evaluator.
# (because evaluation is done only on decoder process)
trainer.extend(BleuEvaluator(model, test_data, device=dev, comm=comm))
def translate_one(source, target):
words = europal.split_sentence(source)
print('# source : ' + ' '.join(words))
x = model.xp.array(
[source_ids.get(w, 1) for w in words], model.xp.int32)
ys = model.translate([x])[0]
words = [target_words[y] for y in ys]
print('# result : ' + ' '.join(words))
print('# expect : ' + target)
def translate(trainer):
translate_one(
'Who are we ?',
'Qui sommes-nous?')
translate_one(
'And it often costs over a hundred dollars ' +
'to obtain the required identity card .',
'Or, il en coûte souvent plus de cent dollars ' +
'pour obtenir la carte d\'identité requise.')
source, target = test_data[numpy.random.choice(len(test_data))]
source = ' '.join([source_words.get(i, '') for i in source])
target = ' '.join([target_words.get(i, '') for i in target])
translate_one(source, target)
if comm.rank == 1:
trigger = (1, 'epoch')
trainer.extend(extensions.LogReport(trigger=trigger),
trigger=trigger)
report = extensions.PrintReport(['epoch',
'iteration',
'main/loss',
'main/perp',
'validation/main/bleu',
'elapsed_time'])
trainer.extend(report, trigger=trigger)
trainer.extend(extensions.ProgressBar(update_interval=1))
comm.mpi_comm.Barrier()
if comm.rank == 0:
print('start training')
sys.stdout.flush()
trainer.run()
if __name__ == '__main__':
main()
|
keisuke-umezawa/chainer
|
examples/chainermn/seq2seq/seq2seq_mp1.py
|
Python
|
mit
| 19,044
|
import yaml
from monitors import MonitorType
from actions import ActionType
from threading import Timer, Lock
import rospy
class Watchdog(object):
yaml_keys = ['name', 'description', 'restart_timeout', 'monitors', 'actions']
def __init__(self, config):
if not isinstance(config, dict):
raise Exception("Watchdog class must be created with dictionary "
"as parameter.")
for key in self.yaml_keys:
if key not in config:
raise Exception("YAML config error: monitor/action pair "
"missing '{}' field".format(key))
self._name = config['name']
self._restart_timeout = config['restart_timeout']
self._restart_timer = None
self._monitors = []
self._executing = Lock()
for monitor in config['monitors']:
self._monitors.append(MonitorType.create(monitor, self.execute))
self._actions = []
for action in config['actions']:
self._actions.append(ActionType.create(action))
def _start_monitors(self):
for monitor in self._monitors:
monitor.start()
def _stop_monitors(self, restart_after_timeout=False):
for monitor in self._monitors:
monitor.stop()
def _check(self):
for monitor in self._monitors:
if monitor.check():
return True
return False
def _execute(self):
rospy.logwarn("Executing '{}' watchdog.".format(self._name))
self.shutdown()
for action in self._actions:
action.execute()
if self._restart_timeout > -1:
self._restart_timer = Timer(self._restart_timeout,
self.run)
self._restart_timer.start()
def _pause(self):
self._check_timer.cancel()
def run(self):
rospy.loginfo("Starting '{}' watchdog".format(self._name))
self._executable = True
self._start_monitors()
def shutdown(self):
rospy.loginfo("Stopping '{}' watchdog".format(self._name))
self._executable = False
self._stop_monitors()
if self._restart_timer is not None:
self._restart_timer.cancel()
def execute(self):
with self._executing:
if self._executable:
self._executable = False
self._execute()
|
bfalacerda/strands_apps
|
watchdog_node/src/watchdog_node/watchdog.py
|
Python
|
mit
| 2,497
|
import numpy
import copy
class HomogeneousData():
def __init__(self, data, batch_size=128, maxlen=None):
self.batch_size = 128
self.data = data
self.batch_size = batch_size
self.maxlen = maxlen
self.prepare()
self.reset()
def prepare(self):
self.caps = self.data[0]
self.feats = self.data[1]
self.feats2 = self.data[2]
# find the unique lengths
self.lengths = [len(cc.split()) for cc in self.caps]
self.len_unique = numpy.unique(self.lengths)
# remove any overly long sentences
if self.maxlen:
self.len_unique = [ll for ll in self.len_unique if ll <= self.maxlen]
# indices of unique lengths
self.len_indices = dict()
self.len_counts = dict()
for ll in self.len_unique:
self.len_indices[ll] = numpy.where(self.lengths == ll)[0]
self.len_counts[ll] = len(self.len_indices[ll])
# current counter
self.len_curr_counts = copy.copy(self.len_counts)
def reset(self):
self.len_curr_counts = copy.copy(self.len_counts)
self.len_unique = numpy.random.permutation(self.len_unique)
self.len_indices_pos = dict()
for ll in self.len_unique:
self.len_indices_pos[ll] = 0
self.len_indices[ll] = numpy.random.permutation(self.len_indices[ll])
self.len_idx = -1
def next(self):
count = 0
while True:
self.len_idx = numpy.mod(self.len_idx+1, len(self.len_unique))
if self.len_curr_counts[self.len_unique[self.len_idx]] > 0:
break
count += 1
if count >= len(self.len_unique):
break
if count >= len(self.len_unique):
self.reset()
raise StopIteration()
# get the batch size
curr_batch_size = numpy.minimum(self.batch_size, self.len_curr_counts[self.len_unique[self.len_idx]])
curr_pos = self.len_indices_pos[self.len_unique[self.len_idx]]
# get the indices for the current batch
curr_indices = self.len_indices[self.len_unique[self.len_idx]][curr_pos:curr_pos+curr_batch_size]
self.len_indices_pos[self.len_unique[self.len_idx]] += curr_batch_size
self.len_curr_counts[self.len_unique[self.len_idx]] -= curr_batch_size
# 'feats' corresponds to the after and before sentences
caps = [self.caps[ii] for ii in curr_indices]
feats = [self.feats[ii] for ii in curr_indices]
feats2 = [self.feats2[ii] for ii in curr_indices]
return caps, feats, feats2
def __iter__(self):
return self
def prepare_data(seqs_x, seqs_y, seqs_z, worddict, maxlen=None, n_words=20000):
"""
Put the data into format useable by the model
"""
seqsX = []
seqsY = []
seqsZ = []
for cc in seqs_x:
seqsX.append([worddict[w] if worddict[w] < n_words else 1 for w in cc.split()])
for cc in seqs_y:
seqsY.append([worddict[w] if worddict[w] < n_words else 1 for w in cc.split()])
for cc in seqs_z:
seqsZ.append([worddict[w] if worddict[w] < n_words else 1 for w in cc.split()])
seqs_x = seqsX
seqs_y = seqsY
seqs_z = seqsZ
lengths_x = [len(s) for s in seqs_x]
lengths_y = [len(s) for s in seqs_y]
lengths_z = [len(s) for s in seqs_z]
if maxlen != None:
new_seqs_x = []
new_seqs_y = []
new_seqs_z = []
new_lengths_x = []
new_lengths_y = []
new_lengths_z = []
for l_x, s_x, l_y, s_y, l_z, s_z in zip(lengths_x, seqs_x, lengths_y, seqs_y, lengths_z, seqs_z):
if l_x < maxlen and l_y < maxlen and l_z < maxlen:
new_seqs_x.append(s_x)
new_lengths_x.append(l_x)
new_seqs_y.append(s_y)
new_lengths_y.append(l_y)
new_seqs_z.append(s_z)
new_lengths_z.append(l_z)
lengths_x = new_lengths_x
seqs_x = new_seqs_x
lengths_y = new_lengths_y
seqs_y = new_seqs_y
lengths_z = new_lengths_z
seqs_z = new_seqs_z
if len(lengths_x) < 1 or len(lengths_y) < 1 or len(lengths_z) < 1:
return None, None, None, None, None, None
n_samples = len(seqs_x)
maxlen_x = numpy.max(lengths_x) + 1
maxlen_y = numpy.max(lengths_y) + 1
maxlen_z = numpy.max(lengths_z) + 1
x = numpy.zeros((maxlen_x, n_samples)).astype('int64')
y = numpy.zeros((maxlen_y, n_samples)).astype('int64')
z = numpy.zeros((maxlen_z, n_samples)).astype('int64')
x_mask = numpy.zeros((maxlen_x, n_samples)).astype('float32')
y_mask = numpy.zeros((maxlen_y, n_samples)).astype('float32')
z_mask = numpy.zeros((maxlen_z, n_samples)).astype('float32')
for idx, [s_x, s_y, s_z] in enumerate(zip(seqs_x,seqs_y,seqs_z)):
x[:lengths_x[idx],idx] = s_x
x_mask[:lengths_x[idx]+1,idx] = 1.
y[:lengths_y[idx],idx] = s_y
y_mask[:lengths_y[idx]+1,idx] = 1.
z[:lengths_z[idx],idx] = s_z
z_mask[:lengths_z[idx]+1,idx] = 1.
return x, x_mask, y, y_mask, z, z_mask
def grouper(text):
"""
Group text into triplets
"""
source = text[1:][:-1]
forward = text[2:]
backward = text[:-2]
X = (source, forward, backward)
return X
|
btjhjeon/ConversationalQA
|
skipthoughts/training/homogeneous_data.py
|
Python
|
mit
| 5,360
|
import unittest
import joerd.output.skadi as skadi
class TestTileName(unittest.TestCase):
def test_tile_name_parsing(self):
for x in range(0, 360):
for y in range(0, 180):
tile_name = skadi._tile_name(x, y)
self.assertEqual((x, y), skadi._parse_tile(tile_name))
|
tilezen/joerd
|
tests/test_skadi.py
|
Python
|
mit
| 321
|
import json
import sqlite3
class TermsAndConditionsDB(object):
db_file = 'terms_conditions.db'
def __init__(self):
self.con = sqlite3.connect(TermsAndConditionsDB.db_file)
with self.con:
cursor = self.con.cursor()
cursor.execute('''CREATE TABLE IF NOT EXISTS terms_conditions_accepts(user_urn TEXT PRIMARY KEY, accept_json TEXT, until_date TEXT)''')
self.con.close()
def find_user_accepts(self, user_urn):
"""
:return: a pair of a date and dict mapping strings to booleans
"""
self.con = sqlite3.connect(TermsAndConditionsDB.db_file)
try:
with self.con:
cursor = self.con.cursor()
cursor.execute('''SELECT accept_json, until_date FROM terms_conditions_accepts WHERE user_urn=?''', (user_urn,))
for row in cursor:
# return (row['until_date'], json.loads(row['accept_json']))
return (row[1], json.loads(row[0]))
return None
finally:
self.con.close()
def register_user_accepts(self, user_urn, accepts, until):
"""
:param user_urn: user urn (str)
:type user_urn: str
:param accepts: a dict which lists what the user has accepted (str -> bool)
:type accepts: dict[str, bool]
:param until: RFC3339 formatted date until which the accepts are valid
:type until: str
"""
self.con = sqlite3.connect(TermsAndConditionsDB.db_file)
try:
with self.con:
cursor = self.con.cursor()
cursor.execute('''INSERT OR REPLACE INTO terms_conditions_accepts (user_urn, accept_json, until_date)
VALUES (?, ?, ?)''', (user_urn, json.dumps(accepts), until))
return
finally:
self.con.close()
def delete_user_accepts(self, user_urn):
"""
:param user_urn: user urn (str)
:type user_urn: str
"""
self.con = sqlite3.connect(TermsAndConditionsDB.db_file)
try:
with self.con:
cursor = self.con.cursor()
cursor.execute('''DELETE FROM terms_conditions_accepts WHERE user_urn=?''', (user_urn,))
return
finally:
self.con.close()
|
open-multinet/docker-am
|
gcf_docker_plugin/terms_conditions/terms_conditions.py
|
Python
|
mit
| 2,364
|
from collections import OrderedDict
import re
import os
from xml.etree import ElementTree as ET
import openmc
import openmc.checkvalue as cv
from openmc.data import NATURAL_ABUNDANCE, atomic_mass
class Element(str):
"""A natural element that auto-expands to add the isotopes of an element to
a material in their natural abundance. Internally, the OpenMC Python API
expands the natural element into isotopes only when the materials.xml file
is created.
Parameters
----------
name : str
Chemical symbol of the element, e.g. Pu
Attributes
----------
name : str
Chemical symbol of the element, e.g. Pu
"""
def __new__(cls, name):
cv.check_type('element name', name, str)
cv.check_length('element name', name, 1, 2)
return super().__new__(cls, name)
@property
def name(self):
return self
def expand(self, percent, percent_type, enrichment=None,
cross_sections=None):
"""Expand natural element into its naturally-occurring isotopes.
An optional cross_sections argument or the OPENMC_CROSS_SECTIONS
environment variable is used to specify a cross_sections.xml file.
If the cross_sections.xml file is found, the element is expanded only
into the isotopes/nuclides present in cross_sections.xml. If no
cross_sections.xml file is found, the element is expanded based on its
naturally occurring isotopes.
Parameters
----------
percent : float
Atom or weight percent
percent_type : {'ao', 'wo'}
'ao' for atom percent and 'wo' for weight percent
enrichment : float, optional
Enrichment for U235 in weight percent. For example, input 4.95 for
4.95 weight percent enriched U. Default is None
(natural composition).
cross_sections : str, optional
Location of cross_sections.xml file. Default is None.
Returns
-------
isotopes : list
Naturally-occurring isotopes of the element. Each item of the list
is a tuple consisting of a nuclide string, the atom/weight percent,
and the string 'ao' or 'wo'.
Notes
-----
When the `enrichment` argument is specified, a correlation from
`ORNL/CSD/TM-244 <https://doi.org/10.2172/5561567>`_ is used to
calculate the weight fractions of U234, U235, U236, and U238. Namely,
the weight fraction of U234 and U236 are taken to be 0.89% and 0.46%,
respectively, of the U235 weight fraction. The remainder of the isotopic
weight is assigned to U238.
"""
# Get the nuclides present in nature
natural_nuclides = set()
for nuclide in sorted(NATURAL_ABUNDANCE.keys()):
if re.match(r'{}\d+'.format(self), nuclide):
natural_nuclides.add(nuclide)
# Create dict to store the expanded nuclides and abundances
abundances = OrderedDict()
# If cross_sections is None, get the cross sections from the
# OPENMC_CROSS_SECTIONS environment variable
if cross_sections is None:
cross_sections = os.environ.get('OPENMC_CROSS_SECTIONS')
# If a cross_sections library is present, check natural nuclides
# against the nuclides in the library
if cross_sections is not None:
library_nuclides = set()
tree = ET.parse(cross_sections)
root = tree.getroot()
for child in root:
nuclide = child.attrib['materials']
if re.match(r'{}\d+'.format(self), nuclide) and \
'_m' not in nuclide:
library_nuclides.add(nuclide)
# Get a set of the mutual and absent nuclides. Convert to lists
# and sort to avoid different ordering between Python 2 and 3.
mutual_nuclides = natural_nuclides.intersection(library_nuclides)
absent_nuclides = natural_nuclides.difference(mutual_nuclides)
mutual_nuclides = sorted(list(mutual_nuclides))
absent_nuclides = sorted(list(absent_nuclides))
# If all natural nuclides are present in the library, expand element
# using all natural nuclides
if len(absent_nuclides) == 0:
for nuclide in mutual_nuclides:
abundances[nuclide] = NATURAL_ABUNDANCE[nuclide]
# If no natural elements are present in the library, check if the
# 0 nuclide is present. If so, set the abundance to 1 for this
# nuclide. Else, raise an error.
elif len(mutual_nuclides) == 0:
nuclide_0 = self + '0'
if nuclide_0 in library_nuclides:
abundances[nuclide_0] = 1.0
else:
msg = 'Unable to expand element {0} because the cross '\
'section library provided does not contain any of '\
'the natural isotopes for that element.'\
.format(self)
raise ValueError(msg)
# If some, but not all, natural nuclides are in the library, add
# the mutual nuclides. For the absent nuclides, add them based on
# our knowledge of the common cross section libraries
# (ENDF, JEFF, and JENDL)
else:
# Add the mutual isotopes
for nuclide in mutual_nuclides:
abundances[nuclide] = NATURAL_ABUNDANCE[nuclide]
# Adjust the abundances for the absent nuclides
for nuclide in absent_nuclides:
if nuclide in ['O17', 'O18'] and 'O16' in mutual_nuclides:
abundances['O16'] += NATURAL_ABUNDANCE[nuclide]
elif nuclide == 'Ta180' and 'Ta181' in mutual_nuclides:
abundances['Ta181'] += NATURAL_ABUNDANCE[nuclide]
elif nuclide == 'W180' and 'W182' in mutual_nuclides:
abundances['W182'] += NATURAL_ABUNDANCE[nuclide]
else:
msg = 'Unsure how to partition natural abundance of ' \
'isotope {0} into other natural isotopes of ' \
'this element that are present in the cross ' \
'section library provided. Consider adding ' \
'the isotopes of this element individually.'
raise ValueError(msg)
# If a cross_section library is not present, expand the element into
# its natural nuclides
else:
for nuclide in natural_nuclides:
abundances[nuclide] = NATURAL_ABUNDANCE[nuclide]
# Modify mole fractions if enrichment provided
if enrichment is not None:
# Calculate the mass fractions of isotopes
abundances['U234'] = 0.0089 * enrichment
abundances['U235'] = enrichment
abundances['U236'] = 0.0046 * enrichment
abundances['U238'] = 100.0 - 1.0135 * enrichment
# Convert the mass fractions to mole fractions
for nuclide in abundances.keys():
abundances[nuclide] /= atomic_mass(nuclide)
# Normalize the mole fractions to one
sum_abundances = sum(abundances.values())
for nuclide in abundances.keys():
abundances[nuclide] /= sum_abundances
# Compute the ratio of the nuclide atomic masses to the element
# atomic mass
if percent_type == 'wo':
# Compute the element atomic mass
element_am = 0.
for nuclide in abundances.keys():
element_am += atomic_mass(nuclide) * abundances[nuclide]
# Convert the molar fractions to mass fractions
for nuclide in abundances.keys():
abundances[nuclide] *= atomic_mass(nuclide) / element_am
# Normalize the mass fractions to one
sum_abundances = sum(abundances.values())
for nuclide in abundances.keys():
abundances[nuclide] /= sum_abundances
# Create a list of the isotopes in this element
isotopes = []
for nuclide, abundance in abundances.items():
isotopes.append((nuclide, percent * abundance, percent_type))
return isotopes
|
johnnyliu27/openmc
|
openmc/element.py
|
Python
|
mit
| 8,571
|
import sys
import re
import numpy
from matplotlib import pyplot
# Choose correct html parser library
version = sys.version_info.major
if version == 2:
from HTMLParser import HTMLParser
else:
from html.parser import HTMLParser
### Initialize Data Structures ###
# Set Month Data Array and Name values.
month = [0] * 12
month_names = [
"Jan",
"Feb",
"March",
"April",
"May",
"June",
"July",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec"
]
# Set Day Data Array and Name values.
day = [0] * 7
day_names = [
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday"
]
# Set Time Data Array and Name values.
time = [0] * 24
time_names = []
for name in range(24):
if name < 10:
time_names.append("0" + str(name) + ":")
else:
time_names.append(str(name) + ":")
for i in range(len(time_names)):
time_names[i] = time_names[i][:-1]
### Setup and Activate Parser/Data Gatherer ###
# Parse Data while adding to data arrays with regex.
class MessagesParser(HTMLParser):
def handle_data(self, data):
match = re.search(
"(Saturday|Sunday|Monday|Tuesday|Wednesday|Thursday|Friday)(,) " \
"[0-9]{2} (January|February|March|April|May|June|" \
"July|August|September|October|November|December) " \
"[0-9]{4} at [0-9]{2}:[0-9]{2}", data)
if match:
for i in range(12):
if re.search(month_names[i], match.group(0)):
month[i] = month[i] + 1
for i in range(7):
if re.search(day_names[i], match.group(0)):
day[i] = day[i] + 1
for i in range(24):
if re.search(time_names[i], match.group(0)):
time[i] = time[i] + 1
# Activate Parser on the messenger HTML data file.
file = open("html/messages.htm", 'r')
messages = file.readlines()
parser = MessagesParser()
for i in range(len(messages)):
parser.feed(messages[i])
### Create Graphs ###
# Bar Graph creater function.
def messages(n_groups, bar_width, x_lim, items, names,
x_label, y_label, title, file_name):
# Plot Graph.
fig, ax = pyplot.subplots()
index = numpy.arange(n_groups)
rects1 = pyplot.bar(index, items, bar_width)
# Add texts and lables.
pyplot.xlabel(x_label)
pyplot.ylabel(y_label)
pyplot.title(title)
# Shape Graph.
pyplot.xticks(index + bar_width/2, names)
pyplot.xlim([min(index) - x_lim, max(index) + x_lim])
pyplot.tight_layout()
# Export Graph.
pyplot.savefig(file_name)
# Create Bar Graph for messenger month frequency.
messages(12, 0.3, 1, month, month_names, "Month of Message Sent",
"Number of Messages Sent", "Amount of Messages Sent on that Month",
"messages_month.png")
# Create Bar Graph for messenger dag frequency.
messages(7, 0.35, 0.5, day, day_names, "Day of Message Sent",
"Number of Messages Sent", "Amount of Messages Sent on Days of the Week",
"messages_days.png")
# Create Bar Graph for messenger time frequency.
messages(24, 0.20, 0.5, time, time_names, "Time of message sent",
"Number of messages sent", "Amount of Messages Sent on Time of the Day",
"messages_times.png")
|
mzhr/fb_freq
|
fb_freq.py
|
Python
|
mit
| 2,990
|
import settings
__author__ = 'Maruf Maniruzzaman'
import tornado
from tornado import gen
from cosmos.service.requesthandler import RequestHandler
class IndexHandler(RequestHandler):
@gen.coroutine
def get(self):
try:
with open(settings.INDEX_HTML_PATH) as f:
self.write(f.read())
except IOError as e:
raise tornado.web.HTTPError(404, "File not found")
|
mmrobbin/myproject
|
views.py
|
Python
|
mit
| 422
|
import requests, csv, time
from bs4 import BeautifulSoup
def download_group_odds():
teams = []
sites = []
for group in (chr(ord('a') + x) for x in range(0,8)):
urltemplate = "http://www.oddschecker.com/football/world-cup/group-{}/to-qualify"
url = urltemplate.format(group)
print "getting %s" % url
r = requests.get(url, cookies={"odds_type":"decimal"})
soup = BeautifulSoup(r.text)
table = soup.find(attrs={"class":"eventTable"})
sitesrow = table.find_all("tr", {"class": "eventTableHeader"})
sitelinks = sitesrow[0].find_all(lambda t: t.has_attr("title"))
sites = [t["title"] for t in sitelinks]
teamrows = table.find_all(attrs={"class": "eventTableRow"})
for row in teamrows:
cols = [t.text for t in row.find_all("td")]
name = cols[1]
if 'any other' in name.lower(): continue
odds = []
isanodd = lambda t: (t.name=="td" and t.has_attr("class") and
('o' in t.attrs["class"] or
'oi' in t.attrs["class"] or
'oo' in t.attrs["class"]))
rawodds = [t.text for t in row.find_all(isanodd)]
for o in rawodds:
if not o or '-' in o: odds.append(None)
else: odds.append(float(o))
assert len(odds) == len(sites), "{} {}".format(odds, sites)
teams.append([name, group] + odds)
return teams, sites
if __name__=="__main__":
teams, sites = download_group_odds()
t = str(time.time()).split(".")[0]
with file("raw/group_odds%s.csv" % t, 'w') as outfile:
w = csv.writer(outfile)
w.writerow(['name'] + sites)
for row in teams:
w.writerow(row)
|
llimllib/champsleagueviz
|
wcqualify/dl.py
|
Python
|
mit
| 1,842
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/clothing/shared_clothing_ith_pants_formal_11.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
obi-two/Rebelion
|
data/scripts/templates/object/draft_schematic/clothing/shared_clothing_ith_pants_formal_11.py
|
Python
|
mit
| 466
|
from TimeFunctions import calculate_time_diff
|
hwroitzsch/BikersLifeSaver
|
src/nfz_module/__init__.py
|
Python
|
mit
| 45
|
from flair.data import TaggedCorpus, Sentence
from flair.data_fetcher import NLPTaskDataFetcher, NLPTask
from flair.embeddings import TokenEmbeddings, WordEmbeddings, StackedEmbeddings, FlairEmbeddings
from typing import List
from flair.models import SequenceTagger
from torch.optim.adam import Adam
columns = {0: 'text', 1: 'pos'}
data_folder = 'data/'
corpus: TaggedCorpus = NLPTaskDataFetcher.load_column_corpus(data_folder, columns,
train_file='Miami_Universal_Tagged.tsv',
test_file='S7_Universal_Tagged.tsv',
dev_file='KC_ARIS_Universal_Tagged.tsv')
temp = corpus.train[0]
def split_sentence(tokens, seq_len = 30):
tokens = [tokens[i: seq_len+i] for i in range(0, len(tokens), seq_len)]
train = []
for t in tokens:
s = Sentence()
s.tokens = t
train.append(s)
return train
train = split_sentence(corpus.train[0])
print(corpus.test)
test = split_sentence(corpus.test[0])
dev = split_sentence(corpus.dev[0])
print(len(train))
print(len(test))
print(len(dev))
corpus = TaggedCorpus(train, dev, test)
tag_type = 'pos'
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
print(tag_dictionary.idx2item)
embedding_types: List[TokenEmbeddings] = [
FlairEmbeddings('news-forward'),
FlairEmbeddings('news-backward'),
FlairEmbeddings('spanish-forward'),
FlairEmbeddings('spanish-backward')
]
embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embedding_types)
tagger: SequenceTagger = SequenceTagger(
hidden_size=512,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type,
use_rnn=True,
use_crf=True,
rnn_layers = 3,
dropout = 0.3
)
from flair.trainers import ModelTrainer
trainer: ModelTrainer = ModelTrainer(tagger, corpus, optimizer = Adam)
trainer.train('resources/taggers/pos',
learning_rate = 1e-4,
mini_batch_size=32,
max_epochs=50)
|
Bilingual-Annotation-Task-Force/Scripts
|
train_flair_POS.py
|
Python
|
mit
| 2,075
|
import re
def removeLinks(text):
"""This Regex taken from http://stackoverflow.com/questions/11331982/how-to-remove-any-url-within-a-string-in-python"""
return re.sub(r'^https?:\/\/.*[\r\n]*', '', text, flags=re.MULTILINE)
|
JoelHoskin/CatHack
|
FilterHelper.py
|
Python
|
mit
| 226
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_tatooine_fixer.iff"
result.attribute_template_id = 9
result.stfName("npc_name","fixer")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
obi-two/Rebelion
|
data/scripts/templates/object/mobile/shared_dressed_tatooine_fixer.py
|
Python
|
mit
| 437
|
#!/usr/bin/env python
import os, sys, datetime
sys.path.insert ( 0, os.path.dirname(os.path.abspath(__file__) ) + "/../" )
import gepard
import json
import threading
import time
import types
import time
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
# import ipaddress
# ==========================================================================
from glob import glob
e = gepard.Event ( "__FILE__" )
# e.setUser ( gepard.User ( "gess" ) )
e.putValue ( "STRING", "TEXT" )
e.putValue ( "BINARY", bytearray ( [ 64, 65, 66, 67 ] ) )
e.putValue ( "DATE", datetime.datetime.now() )
e.putValue ( "STRING/IN/PATH", "AAA" )
e.putValue ( "STRING/IN/PATH2", "BBB" )
e.setStatus ( 0, "success", "File accepted.")
e.setIsResult()
print ( e.getStatus() )
print ( e.getStatusReason() )
print ( e.getStatusName() )
print ( e.getStatusCode() )
print ( e.isBad() )
print ( "e.isResult()=" + str ( e.isResult() ) )
t = e.serialize()
print ( t )
ee = gepard.Event.deserialize ( t )
print ( ee.getStatus() )
print ( ee.getUser() )
|
gessinger-hj/gepard-python
|
test/Event.test.py
|
Python
|
mit
| 1,056
|
import logging
from twisted.python import log
#see http://stackoverflow.com/questions/13748222/twisted-log-level-switch
class LevelFileLogObserver(log.FileLogObserver):
def __init__(self, f, level=logging.INFO):
log.FileLogObserver.__init__(self, f)
self.logLevel = level
def emit(self, eventDict):
if eventDict['isError']:
level = logging.ERROR
elif 'level' in eventDict:
level = eventDict['level']
else:
level = logging.INFO
if level >= self.logLevel:
log.FileLogObserver.emit(self, eventDict)
|
rauburtin/sftpproxydocker
|
sftpproxydocker/levfilelogger.py
|
Python
|
mit
| 603
|
"""
SoftLayer.tests.CLI.core_tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
import io
import logging
import click
from unittest import mock as mock
from requests.models import Response
import SoftLayer
from SoftLayer.CLI import core
from SoftLayer.CLI import environment
from SoftLayer import testing
class CoreTests(testing.TestCase):
def test_load_all(self):
for path, cmd in recursive_subcommand_loader(core.cli, current_path='root'):
try:
cmd.main(args=['--help'])
except SystemExit as ex:
if ex.code != 0:
self.fail("Non-zero exit code for command: %s" % path)
def test_verbose_max(self):
with mock.patch('logging.getLogger') as log_mock:
result = self.run_command(['-vvv', 'vs', 'list'])
self.assert_no_fail(result)
log_mock().addHandler.assert_called_with(mock.ANY)
log_mock().setLevel.assert_called_with(logging.DEBUG)
def test_build_client(self):
env = environment.Environment()
result = self.run_command(['vs', 'list'], env=env)
self.assert_no_fail(result)
self.assertIsNotNone(env.client)
def test_diagnostics(self):
result = self.run_command(['-v', 'vs', 'list'])
self.assert_no_fail(result)
self.assertIn('SoftLayer_Account::getVirtualGuests', result.output)
self.assertIn('"execution_time"', result.output)
self.assertIn('"api_calls"', result.output)
self.assertIn('"version"', result.output)
self.assertIn('"python_version"', result.output)
self.assertIn('"library_location"', result.output)
@mock.patch('requests.get')
def test_get_latest_version(self, request_get):
response = Response()
response.status_code = 200
response.json = mock.MagicMock(return_value={"info": {"version": "1.1.1"}})
request_get.return_value = response
version = core.get_latest_version()
self.assertIn('1.1.1', version)
@mock.patch('requests.get')
def test_unable_get_latest_version(self, request_get):
request_get.side_effect = Exception
version = core.get_latest_version()
self.assertIn('Unable', version)
@mock.patch('SoftLayer.CLI.core.get_latest_version')
def test_get_version_message(self, get_latest_version_mock):
get_latest_version_mock.return_value = '1.1.1'
env = environment.Environment()
result = self.run_command(['--version'], env=env)
self.assert_no_fail(result)
class CoreMainTests(testing.TestCase):
@mock.patch('SoftLayer.CLI.core.cli.main')
@mock.patch('sys.stdout', new_callable=io.StringIO)
def test_unexpected_error(self, stdoutmock, climock):
climock.side_effect = AttributeError('Attribute foo does not exist')
self.assertRaises(SystemExit, core.main)
self.assertIn("Feel free to report this error as it is likely a bug",
stdoutmock.getvalue())
self.assertIn("Traceback (most recent call last)",
stdoutmock.getvalue())
self.assertIn("AttributeError: Attribute foo does not exist",
stdoutmock.getvalue())
@mock.patch('SoftLayer.CLI.core.cli.main')
@mock.patch('sys.stdout', new_callable=io.StringIO)
def test_sl_error(self, stdoutmock, climock):
ex = SoftLayer.SoftLayerAPIError('SoftLayer_Exception', 'Not found')
climock.side_effect = ex
self.assertRaises(SystemExit, core.main)
self.assertIn("SoftLayerAPIError(SoftLayer_Exception): Not found",
stdoutmock.getvalue())
@mock.patch('SoftLayer.CLI.core.cli.main')
@mock.patch('sys.stdout', new_callable=io.StringIO)
def test_auth_error(self, stdoutmock, climock):
ex = SoftLayer.SoftLayerAPIError('SoftLayer_Exception',
'Invalid API token.')
climock.side_effect = ex
self.assertRaises(SystemExit, core.main)
self.assertIn("Authentication Failed:", stdoutmock.getvalue())
self.assertIn("use 'slcli config setup'", stdoutmock.getvalue())
def recursive_subcommand_loader(root, current_path=''):
"""Recursively load and list every command."""
if getattr(root, 'list_commands', None) is None:
return
ctx = click.Context(root)
for command in root.list_commands(ctx):
new_path = '%s:%s' % (current_path, command)
logging.info("loading %s", new_path)
new_root = root.get_command(ctx, command)
if new_root is None:
raise Exception('Could not load command: %s' % command)
for path, cmd in recursive_subcommand_loader(new_root,
current_path=new_path):
yield path, cmd
yield current_path, new_root
|
softlayer/softlayer-python
|
tests/CLI/core_tests.py
|
Python
|
mit
| 4,941
|
from pyhdf.HDF import *
from pyhdf.VS import *
f = HDF('inventory.hdf', # Open file 'inventory.hdf' in write mode
HC.WRITE|HC.CREATE) # creating it if it does not exist
vs = f.vstart() # init vdata interface
vd = vs.attach('INVENTORY', 1) # attach vdata 'INVENTORY' in write mode
# Update the `status' vdata attribute. The attribute length must not
# change. We call the attribute info() method, which returns a list where
# number of values (eg string length) is stored at index 2.
# We then assign a left justified string of exactly that length.
len = vd.attr('status').info()[2]
vd.status = '%-*s' % (len, 'phase 3 done')
# Update record at index 1 (second record)
vd[1] = ('Z4367', 'surprise', 10, 3.1, 44.5)
# Update record at index 4, and those after
vd[4:] = (
('QR231', 'toy', 12, 2.5, 45),
('R3389', 'robot', 3, 45, 2000),
('R3390', 'robot2', 8, 55, 2050)
)
vd.detach() # "close" the vdata
vs.end() # terminate the vdata interface
f.close() # close the HDF file
|
fhs/python-hdf4
|
examples/inventory/inventory_1-3.py
|
Python
|
mit
| 1,101
|
import aiohttp
from time import time
import json
from hashlib import sha256
import hmac
from .fetcher import Fetcher
class BinanceAPI(Fetcher):
_URL = 'https://api.binance.com/api/v3/'
_KEY = None
_SECRET = None
def __init__(self, key, secret):
if key is None or secret is None:
raise EnvironmentError("Binance key and secret must be specified in configs")
self._KEY = key
self._SECRET = secret
def _signature(self, query):
message = query
return hmac.new(
key=self._SECRET.encode(),
msg=message.encode(),
digestmod=sha256
).hexdigest().upper()
async def get_balances(self, loop, symbols, callback=None):
async with aiohttp.ClientSession(loop=loop) as session:
nonce = int(time() * 1000)
query = 'timestamp={}&recvWindow={}'.format(nonce, 30000)
endpoint = self._URL + 'account?' + query
headers = {
'X-MBX-APIKEY': self._KEY
}
signature = self._signature(query)
endpoint += '&signature={}'.format(signature)
_response = await self._fetch(session=session, url=endpoint, headers=headers)
balances = json.loads(_response).get('balances', [])
result = []
for balance in balances:
if balance['asset'] in symbols:
result.append(
(balance['asset'],
float(balance.get('locked', 0)) + float(balance.get('free', 0)))
)
if callback is not None:
callback(result)
return result
|
etherionlab/the_token_fund_asset_parser
|
models/binance.py
|
Python
|
mit
| 1,694
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Flask-NewProject documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 5 14:38:01 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Flask-NewProject'
copyright = '2017, Raymond Williams'
author = 'Raymond Williams'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2.1'
# The full version, including alpha/beta/rc tags.
release = '0.2.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Flask-NewProjectdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Flask-NewProject.tex', 'Flask-NewProject Documentation',
'Raymond Williams', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'flask-newproject', 'Flask-NewProject Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Flask-NewProject', 'Flask-NewProject Documentation',
author, 'Flask-NewProject', 'One line description of project.',
'Miscellaneous'),
]
|
Gunak/flask-blueprintTemplate
|
docs/source/conf.py
|
Python
|
mit
| 4,830
|
# -*- coding: utf-8 -*-
import os
try:
import zlib as binascii
except ImportError:
import binascii
from base64 import urlsafe_b64encode
import auth.up
import conf
_workers = 1
_task_queue_size = _workers * 4
_chunk_size = 256 * 1024
_try_times = 3
_block_size = 4 * 1024 * 1024
class Error(Exception):
value = None
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
err_invalid_put_progress = Error("invalid put progress")
err_put_failed = Error("resumable put failed")
err_unmatched_checksum = Error("unmatched checksum")
def setup(chunk_size=0, try_times=0):
"""
* chunk_size => 默认的Chunk大小,不设定则为256k
* try_times => 默认的尝试次数,不设定则为3
"""
global _chunk_size, _try_times
if chunk_size == 0:
chunk_size = 1 << 18
if try_times == 0:
try_times = 3
_chunk_size, _try_times = chunk_size, try_times
# ----------------------------------------------------------
def gen_crc32(data):
return binascii.crc32(data) & 0xffffffff
class PutExtra(object):
params = None # 自定义用户变量, key需要x: 开头
mimetype = None # 可选。在 uptoken 没有指定 DetectMime 时,用户客户端可自己指定 MimeType
chunk_size = None # 可选。每次上传的Chunk大小
try_times = None # 可选。尝试次数
progresses = None # 可选。上传进度
notify = lambda self, idx, size, ret: None # 可选。进度提示
notify_err = lambda self, idx, size, err: None
def __init__(self, bucket):
self.bucket = bucket
def put_file(uptoken, key, localfile, extra):
""" 上传文件 """
f = open(localfile, "rb")
statinfo = os.stat(localfile)
ret = put(uptoken, key, f, statinfo.st_size, extra)
f.close()
return ret
def put(uptoken, key, f, fsize, extra):
""" 上传二进制流, 通过将data "切片" 分段上传 """
if not isinstance(extra, PutExtra):
print("extra must the instance of PutExtra")
return
block_cnt = block_count(fsize)
if extra.progresses is None:
extra.progresses = [None for i in xrange(0, block_cnt)]
else:
if not len(extra.progresses) == block_cnt:
return None, err_invalid_put_progress
if extra.try_times is None:
extra.try_times = _try_times
if extra.chunk_size is None:
extra.chunk_size = _chunk_size
client = auth.up.Client(uptoken)
for i in xrange(0, block_cnt):
try_time = extra.try_times
read_length = _block_size
if (i+1)*_block_size > fsize:
read_length = fsize - i*_block_size
data_slice = f.read(read_length)
while True:
err = resumable_block_put(client, data_slice, i, extra)
if err is None:
break
try_time -= 1
if try_time <= 0:
return None, err_put_failed
print err, ".. retry"
return mkfile(client, key, fsize, extra)
# ----------------------------------------------------------
def resumable_block_put(client, block, index, extra):
block_size = len(block)
if extra.progresses[index] is None or "ctx" not in extra.progresses[index]:
end_pos = extra.chunk_size-1
if block_size < extra.chunk_size:
end_pos = block_size-1
chunk = block[: end_pos]
crc32 = gen_crc32(chunk)
chunk = bytearray(chunk)
extra.progresses[index], err = mkblock(client, block_size, chunk)
if not extra.progresses[index]["crc32"] == crc32:
return err_unmatched_checksum
if err is not None:
extra.notify_err(index, end_pos + 1, err)
return err
extra.notify(index, end_pos + 1, extra.progresses[index])
while extra.progresses[index]["offset"] < block_size:
offset = extra.progresses[index]["offset"]
chunk = block[offset: offset+extra.chunk_size-1]
crc32 = gen_crc32(chunk)
chunk = bytearray(chunk)
extra.progresses[index], err = putblock(client, extra.progresses[index], chunk)
if not extra.progresses[index]["crc32"] == crc32:
return err_unmatched_checksum
if err is not None:
extra.notify_err(index, len(chunk), err)
return err
extra.notify(index, len(chunk), extra.progresses[index])
def block_count(size):
global _block_size
return size / _block_size + 1
def mkblock(client, block_size, first_chunk):
url = "http://%s/mkblk/%s" % (conf.UP_HOST, block_size)
content_type = "application/octet-stream"
return client.call_with(url, first_chunk, content_type, len(first_chunk))
def putblock(client, block_ret, chunk):
url = "%s/bput/%s/%s" % (block_ret["host"], block_ret["ctx"], block_ret["offset"])
content_type = "application/octet-stream"
return client.call_with(url, chunk, content_type, len(chunk))
def mkfile(client, key, fsize, extra):
url = ["http://%s/mkfile/%s" % (conf.UP_HOST, fsize)]
if extra.mimetype:
url.append("mimeType/%s" % urlsafe_b64encode(extra.mimetype))
if key is not None:
url.append("key/%s" % urlsafe_b64encode(key))
if extra.params:
for k, v in extra.params.iteritems():
url.append("%s/%s" % (k, urlsafe_b64encode(v)))
url = "/".join(url)
body = ",".join([i["ctx"] for i in extra.progresses])
return client.call_with(url, body, "text/plain", len(body))
|
yobin/saepy-log
|
qiniu/resumable_io.py
|
Python
|
mit
| 5,011
|
#!/usr/bin/env python
from agate.columns.base import Column
class BooleanColumn(Column):
"""
A column containing :class:`bool` data.
"""
pass
|
TylerFisher/agate
|
agate/columns/boolean.py
|
Python
|
mit
| 160
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/component/chemistry/shared_dye_hair.iff"
result.attribute_template_id = -1
result.stfName("","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/tangible/component/chemistry/shared_dye_hair.py
|
Python
|
mit
| 433
|
from model import *
# ------------------------------------------------------------------ helpers and mgmt
def get_feed_dic_obs(obs):
# needing to create all the nessisary feeds
obs_x = []
obs_y = []
obs_tf = []
for _ in range(OBS_SIZE):
obs_x.append(np.zeros([N_BATCH,L]))
obs_y.append(np.zeros([N_BATCH,L]))
obs_tf.append(np.zeros([N_BATCH,2]))
num_obs = len(obs)
for ob_idx in range(num_obs):
ob_coord, ob_lab = obs[ob_idx]
ob_x, ob_y = vectorize(ob_coord)
obs_x[ob_idx] = np.tile(ob_x, [50,1])
obs_y[ob_idx] = np.tile(ob_y, [50,1])
obs_tf[ob_idx] = np.tile(ob_lab, [50,1])
feed_dic = dict(zip(ph_obs_x + ph_obs_y + ph_obs_tf,
obs_x + obs_y + obs_tf))
return feed_dic
def get_inv(sess, obs):
num_obs = len(obs)
return get_inv_tr(sess, obs)[num_obs]
def get_inv_tr(sess, obs):
num_obs = len(obs)
feed_dic = get_feed_dic_obs(obs)
x_invss = [np.argmax(x[0]) for x in sess.run(x_invs, feed_dict = feed_dic)]
y_invss = [np.argmax(x[0]) for x in sess.run(y_invs, feed_dict = feed_dic)]
return zip(x_invss, y_invss)
def get_most_confuse(sess, obs):
feed_dic = get_feed_dic_obs(obs)
key_ob = len(obs)
all_querys = []
for i in range(L):
for j in range(L):
all_querys.append((i,j))
most_conf = (1.0, None)
for q in all_querys:
q_x, q_y = vectorize(q)
feed_dic[ph_new_ob_x] = np.tile(q_x, [N_BATCH,1])
feed_dic[ph_new_ob_y] = np.tile(q_y, [N_BATCH,1])
pred_tf = sess.run(query_preds, feed_dict=feed_dic)[key_ob][0]
most_conf = min(most_conf, (abs(pred_tf[0] - pred_tf[1]), q))
return most_conf[1]
def get_random_inv(sess, query):
ob_pts = [(np.random.randint(0, L), np.random.randint(0,L)) for _ in range(OBS_SIZE)]
obs = [(op, query(op)) for op in ob_pts]
return zip([None] + obs, get_inv_tr(sess, obs))
def get_active_inv(sess, query):
obs = []
for i in range(OBS_SIZE):
try_inv = get_inv(sess, obs)
most_conf = get_most_confuse(sess, obs)
print "chosen observation ", most_conf
obs.append((most_conf, query(most_conf)))
return zip([None] + obs, get_inv_tr(sess, obs))
|
evanthebouncy/nnhmm
|
radar_lstm/active_learning.py
|
Python
|
mit
| 2,142
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-08-11 11:31
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('transactions', '0005_auto_20170811_1048'),
]
operations = [
]
|
sebastienbarbier/723e_server
|
seven23/models/transactions/migrations/0006_auto_20170811_1131.py
|
Python
|
mit
| 294
|
import sys
from restorm.examples.mock.api import TicketApiClient
def main(argv):
"""
Start with::
python -m restorm.examples.mock.library_serv [port or address:port]
"""
ip_address = '127.0.0.1'
port = 8000
# This is an example. Your should do argument checking.
if len(argv) == 1:
ip_address_port = argv[0].split(':', 1)
if len(ip_address_port) == 1:
port = ip_address_port[0]
else:
ip_address, port = ip_address_port
# Create a playground HTTP server that handles requests from the
# ``LibraryApiClient``.
api = TicketApiClient('http://%s:%s/api/' % (ip_address, port))
server = api.create_server(ip_address, int(port))
print 'Mock ticket webservice is running at http://%s:%s/api/' % (ip_address, port)
print 'Quit the server with CTRL-C.'
try:
server.serve_forever()
except KeyboardInterrupt:
print 'Closing server...'
server.socket.close()
if __name__ == '__main__':
main(sys.argv[1:])
|
joeribekker/restorm
|
restorm/examples/mock/ticket_serv.py
|
Python
|
mit
| 1,056
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#############################
# name:updateHosts
# author:https://github.com/ladder1984
# version:1.3.3
# license:MIT
############################
import urllib2
import platform
import datetime
import time
import re
import os
import shutil
import ConfigParser
import sys
import socket
# default setting
hosts_folder = ""
hosts_location = hosts_folder + "hosts"
source_list = ['https://raw.githubusercontent.com/vokins/simpleu/master/hosts']
not_block_sites = 0
always_on = 0
# default setting
errorLog = open('errorLog.txt', 'a')
def get_cur_info():
return(sys._getframe().f_back.f_code.co_name)
def exit_this():
errorLog.close()
sys.exit()
def check_connection():
sleep_seconds = 1200
i = 0
for i in range(sleep_seconds):
try:
socket.gethostbyname("www.baidu.com")
break
except socket.gaierror:
time.sleep(1)
if i == sleep_seconds - 1:
exit_this()
def check_system():
global hosts_folder
global hosts_location
if platform.system() == 'Windows':
hosts_folder = os.environ['SYSTEMROOT']+"\\System32\\drivers\\etc\\"
elif platform.system() == 'Linux'or platform.system() == 'Darwin':
hosts_folder = "/etc/"
else:
exit_this()
hosts_location = hosts_folder + "hosts"
def get_config():
global source_list
global not_block_sites
global always_on
if os.path.exists('config.ini'):
try:
# 清除Windows记事本自动添加的BOM
content = open('config.ini').read()
content = re.sub(r"\xfe\xff", "", content)
content = re.sub(r"\xff\xfe", "", content)
content = re.sub(r"\xef\xbb\xbf", "", content)
open('config.ini', 'w').write(content)
config = ConfigParser.ConfigParser()
config.read('config.ini')
source_id = config.get('source_select', 'source_id')
source_list = source_id.split(",")
for i in range(len(source_list)):
source_list[i]=config.get('source_select', 'source'+str(i+1))
not_block_sites = config.get("function", "not_block_sites")
always_on = config.get("function","always_on")
except BaseException, e:
errorLog.write(str(datetime.datetime.now())+'\n'+'function:'+get_cur_info()+'\nerror:'+str(e)+'\n\n')
exit_this()
def backup_hosts():
try:
if (not os.path.isfile(hosts_folder + 'backup_hosts_original_by_updateHosts')) and \
os.path.isfile(hosts_folder + 'hosts'):
shutil.copy(hosts_folder+'hosts', hosts_folder+'backup_hosts_original_by_updateHosts')
if os.path.isfile(hosts_folder + 'hosts'):
shutil.copy(hosts_folder+'hosts', hosts_folder+'backup_hosts_last_by_updateHosts')
except BaseException, e:
errorLog.write(str(datetime.datetime.now())+'\n'+'function:'+get_cur_info()+'\nerror:'+str(e)+'\n\n')
exit_this()
def download_hosts():
try:
hosts_from_web = open("hosts_from_web","a")
for x in source_list:
data=urllib2.urlopen(x)
hosts_from_web.write(data.read())
except BaseException, e:
errorLog.write(str(datetime.datetime.now())+'\n'+'function:'+get_cur_info()+'\nerror:'+str(e)+'\n\n')
exit_this()
def process_hosts():
try:
hosts_content = open('hosts', 'w')
file_from_web = open('hosts_from_web')
hosts_from_web = file_from_web.read()
file_user_defined = open('hosts_user_defined.txt')
hosts_user_defined = file_user_defined.read()
hosts_content.write('#hosts_user_defined\n')
hosts_content.write(hosts_user_defined)
hosts_content.write('\n#hosts_user_defined\n')
hosts_content.write('\n\n#hosts_by_hostsUpdate\n\n')
if not_block_sites is "1":
hosts_from_web = re.sub("127.0.0.1", "#not_block_sites", hosts_from_web)
hosts_content.write(hosts_from_web)
hosts_content.write('\n#hosts_by_hostsUpdate')
hosts_content.close()
file_from_web.close()
file_user_defined.close()
os.remove('hosts_from_web')
except BaseException, e:
errorLog.write(str(datetime.datetime.now())+'\n'+'function:'+get_cur_info()+'\nerror:'+str(e)+'\n\n')
exit_this()
def move_hosts():
try:
shutil.move("hosts", hosts_location)
except BaseException, e:
errorLog.write(str(datetime.datetime.now())+'\n'+'function:'+get_cur_info()+'\nerror:'+str(e)+'\n\n')
exit_this()
def main():
check_connection()
check_system()
get_config()
backup_hosts()
download_hosts()
process_hosts()
move_hosts()
errorLog.close()
if __name__ == '__main__':
main()
if always_on == "1":
while 1:
time.sleep(3600)
main()
|
wuantony0701/updateHosts
|
updateHosts.py
|
Python
|
mit
| 4,913
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class SecurityRule(SubResource):
"""Network security rule.
:param id: Resource ID.
:type id: str
:param description: A description for this rule. Restricted to 140 chars.
:type description: str
:param protocol: Network protocol this rule applies to. Possible values
are 'Tcp', 'Udp', and '*'. Possible values include: 'Tcp', 'Udp', '*'
:type protocol: str or :class:`SecurityRuleProtocol
<azure.mgmt.network.v2017_03_01.models.SecurityRuleProtocol>`
:param source_port_range: The source port or range. Integer or range
between 0 and 65535. Asterix '*' can also be used to match all ports.
:type source_port_range: str
:param destination_port_range: The destination port or range. Integer or
range between 0 and 65535. Asterix '*' can also be used to match all
ports.
:type destination_port_range: str
:param source_address_prefix: The CIDR or source IP range. Asterix '*' can
also be used to match all source IPs. Default tags such as
'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If
this is an ingress rule, specifies where network traffic originates from.
:type source_address_prefix: str
:param destination_address_prefix: The destination address prefix. CIDR or
source IP range. Asterix '*' can also be used to match all source IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet'
can also be used.
:type destination_address_prefix: str
:param access: The network traffic is allowed or denied. Possible values
are: 'Allow' and 'Deny'. Possible values include: 'Allow', 'Deny'
:type access: str or :class:`SecurityRuleAccess
<azure.mgmt.network.v2017_03_01.models.SecurityRuleAccess>`
:param priority: The priority of the rule. The value can be between 100
and 4096. The priority number must be unique for each rule in the
collection. The lower the priority number, the higher the priority of the
rule.
:type priority: int
:param direction: The direction of the rule. The direction specifies if
rule will be evaluated on incoming or outcoming traffic. Possible values
are: 'Inbound' and 'Outbound'. Possible values include: 'Inbound',
'Outbound'
:type direction: str or :class:`SecurityRuleDirection
<azure.mgmt.network.v2017_03_01.models.SecurityRuleDirection>`
:param provisioning_state: The provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'protocol': {'required': True},
'source_address_prefix': {'required': True},
'destination_address_prefix': {'required': True},
'access': {'required': True},
'direction': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'source_port_range': {'key': 'properties.sourcePortRange', 'type': 'str'},
'destination_port_range': {'key': 'properties.destinationPortRange', 'type': 'str'},
'source_address_prefix': {'key': 'properties.sourceAddressPrefix', 'type': 'str'},
'destination_address_prefix': {'key': 'properties.destinationAddressPrefix', 'type': 'str'},
'access': {'key': 'properties.access', 'type': 'str'},
'priority': {'key': 'properties.priority', 'type': 'int'},
'direction': {'key': 'properties.direction', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, protocol, source_address_prefix, destination_address_prefix, access, direction, id=None, description=None, source_port_range=None, destination_port_range=None, priority=None, provisioning_state=None, name=None, etag=None):
super(SecurityRule, self).__init__(id=id)
self.description = description
self.protocol = protocol
self.source_port_range = source_port_range
self.destination_port_range = destination_port_range
self.source_address_prefix = source_address_prefix
self.destination_address_prefix = destination_address_prefix
self.access = access
self.priority = priority
self.direction = direction
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
|
SUSE/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/security_rule.py
|
Python
|
mit
| 5,400
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-15 19:07
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('stats', '0011_jsonb_step_2'),
]
operations = [
migrations.RenameField(
model_name='logentry',
old_name='extra_data_new',
new_name='extra_data',
),
migrations.RenameField(
model_name='mission',
old_name='score_dict_new',
new_name='score_dict',
),
migrations.RenameField(
model_name='player',
old_name='ammo_new',
new_name='ammo',
),
migrations.RenameField(
model_name='player',
old_name='killboard_pve_new',
new_name='killboard_pve',
),
migrations.RenameField(
model_name='player',
old_name='killboard_pvp_new',
new_name='killboard_pvp',
),
migrations.RenameField(
model_name='player',
old_name='sorties_cls_new',
new_name='sorties_cls',
),
migrations.RenameField(
model_name='playeraircraft',
old_name='ammo_new',
new_name='ammo',
),
migrations.RenameField(
model_name='playeraircraft',
old_name='killboard_pve_new',
new_name='killboard_pve',
),
migrations.RenameField(
model_name='playeraircraft',
old_name='killboard_pvp_new',
new_name='killboard_pvp',
),
migrations.RenameField(
model_name='playermission',
old_name='ammo_new',
new_name='ammo',
),
migrations.RenameField(
model_name='playermission',
old_name='killboard_pve_new',
new_name='killboard_pve',
),
migrations.RenameField(
model_name='playermission',
old_name='killboard_pvp_new',
new_name='killboard_pvp',
),
migrations.RenameField(
model_name='sortie',
old_name='ammo_new',
new_name='ammo',
),
migrations.RenameField(
model_name='sortie',
old_name='bonus_new',
new_name='bonus',
),
migrations.RenameField(
model_name='sortie',
old_name='debug_new',
new_name='debug',
),
migrations.RenameField(
model_name='sortie',
old_name='killboard_pve_new',
new_name='killboard_pve',
),
migrations.RenameField(
model_name='sortie',
old_name='killboard_pvp_new',
new_name='killboard_pvp',
),
migrations.RenameField(
model_name='sortie',
old_name='score_dict_new',
new_name='score_dict',
),
]
|
Flyingfox646/flyingfox
|
src/stats/migrations/0012_jsonb_step_3.py
|
Python
|
mit
| 3,011
|
""" This module contains a class for quickly creating bots. It is the highest
level of abstraction of the IRC protocol available in ``ircutils``.
"""
from . import client
from . import events
class SimpleBot(client.SimpleClient):
""" A simple IRC bot to subclass. When subclassing, make methods in the
form of ``on_eventname`` and they will automatically be bound to that
event listener.
This class inherits from :class:`ircutils.client.SimpleClient`, so be sure
to check that documentation to see more of what is available.
"""
def __init__(self, nick):
client.SimpleClient.__init__(self, nick)
self._autobind_handlers()
def _autobind_handlers(self):
""" Looks for "on_<event-name>" methods on the object and automatically
binds them to the listener for that event.
"""
for listener_name in self.events:
name = "on_%s" % listener_name
if hasattr(self, name):
handler = getattr(self, name).__func__
self.events[listener_name].add_handler(handler)
def register_listener(self, event_name, listener):
""" Same as :func:`ircutils.client.SimpleClient.register_listener`
execpt that if there is a handler in the bot already, it auto-binds it
to the listener.
"""
self.events.register_listener(event_name, listener)
handler_name = "on_{0}".format(event_name)
if hasattr(self, handler_name):
handler = getattr(self, handler_name).__func__
self.events[event_name].add_handler(handler)
class _TestBot(SimpleBot):
""" A bot for debugging. Designed to be subclassed to building test bots.
"""
def __init__(self, nick):
SimpleBot.__init__(self, nick)
self["any"].add_handler(self.print_line)
self.verbose = True
def print_line(self, client, event):
kwds = {
"cmd": event.command,
"src": event.source,
"tgt": event.target,
"params": event.params
}
if self.verbose:
print("[{cmd}] s={src!r} t={tgt!r} p={params}".format(**kwds))
|
Alakala/eggpy
|
ircutils/bot.py
|
Python
|
mit
| 2,203
|
""" Add inventory in and inventory out link to calculate profit using FIFO method
Revision ID: a173601e2e8c
Revises: 5fa54f2ce13c
Create Date: 2017-03-29 07:06:57.758959
"""
# revision identifiers, used by Alembic.
revision = 'a173601e2e8c'
down_revision = '5fa54f2ce13c'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('inventory_in_out_link',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date', sa.DateTime(), nullable=False),
sa.Column('product_id', sa.Integer(), nullable=False),
sa.Column('in_price', sa.Numeric(precision=8, scale=2, decimal_return_scale=2), nullable=False),
sa.Column('in_date', sa.DateTime(), nullable=False),
sa.Column('receiving_line_id', sa.Integer(), nullable=False),
sa.Column('out_price', sa.Numeric(precision=8, scale=2, decimal_return_scale=2), nullable=False),
sa.Column('out_date', sa.DateTime(), nullable=False),
sa.Column('out_quantity', sa.Numeric(precision=8, scale=2, decimal_return_scale=2), nullable=False),
sa.Column('shipping_line_id', sa.Integer(), nullable=False),
sa.Column('organization_id', sa.Integer(), nullable=True),
sa.Column('remark', sa.Text(), nullable=True),
sa.ForeignKeyConstraint(['organization_id'], ['organization.id'], ),
sa.ForeignKeyConstraint(['product_id'], ['product.id'], ),
sa.ForeignKeyConstraint(['receiving_line_id'], ['receiving_line.id'], ),
sa.ForeignKeyConstraint(['shipping_line_id'], ['shipping_line.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.add_column(u'inventory_transaction_line', sa.Column('saleable_quantity', sa.Numeric(precision=8, scale=2, decimal_return_scale=2), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column(u'inventory_transaction_line', 'saleable_quantity')
op.drop_table('inventory_in_out_link')
# ### end Alembic commands ###
|
betterlife/psi
|
psi/migrations/versions/35_a173601e2e8c_.py
|
Python
|
mit
| 2,034
|
# Skip if long ints are not supported.
import skip_if
skip_if.no_bigint()
print((2**64).to_bytes(9, "little"))
print((-2**64).to_bytes(9, "little", signed=True))
print(int.from_bytes(b"\x00\x01\0\0\0\0\0\0", "little"))
print(int.from_bytes(b"\x01\0\0\0\0\0\0\0", "little"))
print(int.from_bytes(b"\x00\x01\0\0\0\0\0\0", "little"))
|
adafruit/micropython
|
tests/basics/int_longint_bytes.py
|
Python
|
mit
| 332
|
# -*- coding: utf-8 -*-
from django.db import models, migrations
import django.core.validators
import open_humans.models
class Migration(migrations.Migration):
dependencies = [
('open_humans', '0014_rename_openhumansuser'),
]
operations = [
migrations.AlterModelManagers(
name='user',
managers=[
('objects', open_humans.models.OpenHumansUserManager()),
],
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254, verbose_name='email address', blank=True),
),
migrations.AlterField(
model_name='user',
name='groups',
field=models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups'),
),
migrations.AlterField(
model_name='user',
name='last_login',
field=models.DateTimeField(null=True, verbose_name='last login', blank=True),
),
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, max_length=30, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, verbose_name='username'),
),
]
|
PersonalGenomesOrg/open-humans
|
open_humans/migrations/0015_auto_20150410_0042.py
|
Python
|
mit
| 1,726
|
#!/usr/bin/env python
import sys
import os
import piny
# settings
P = 4
box = 3 * [24.832]
dir_out_eq = 'equilibration'
dir_out_prod = 'production'
fn_initial_xyz = 'W512-initial.xyz'
fn_FF = 'water-q-TIP4P-F.py'
min_dist = 0.1
max_dist = 10.0
res_dist = 5.0
# times in fs
t_tot_eq = 10000
t_tot_prod = 100000
t_write = 40
t_screen = 40
dt = 2.0
write_freq_screen = int(t_screen / dt)
write_freq = int(t_write / dt)
n_step_eq = int(t_tot_eq / dt)
n_step_prod = int(t_tot_prod / dt)
comment, names, positions = piny.tools.read_XYZ_frame(open(fn_initial_xyz))
nwater = names.count('O')
initial = piny.tools.initial_file(positions, P, box)
input_PINY = {
'sim_gen_def': {
'simulation_typ': 'pimd',
'ensemble_typ': 'npt_i',
'num_time_step': n_step_eq,
'restart_type': 'initial',
'time_step': dt,
'temperature': 300,
'pressure': 1,
'generic_fft_opt': 'on',
'num_proc_tot': P,
'num_proc_beads': P,
'num_proc_class_forc': 1},
'sim_pimd_def': {
'path_int_beads': P,
'path_int_md_typ': 'centroid',
'respa_steps_pimd': 5,
'initial_spread_opt': 'on',
'initial_spread_size': 0.1
},
'sim_list_def': {
'neighbor_list': 'ver_list',
'verlist_skin': 1.5,
'update_type': 'no_list',
},
'sim_run_def': {
'respa_steps_intra': 2,
'respa_steps_torsion': 1,
'respa_steps_lrf': 4
},
'sim_vol_def': {
'periodicity': 3,
'volume_tau': 1000,
'volume_nhc_tau': 1000},
'sim_class_PE_def': {
'shift_inter_pe': 'swit',
'inter_spline_pts': 5000,
'ewald_alpha': 10,
'ewald_kmax': 13,
'ewald_pme_opt': 'on',
'ewald_kmax_pme': 22,
'ewald_interp_pme': 8,
'ewald_respa_pme_opt': 'off',
'inter_PE_calc_freq': 1},
'sim_nhc_def': {
'atm_nhc_tau_def': 20.0,
'atm_nhc_len': 4,
'respa_steps_nhc': 4,
'yosh_steps_nhc': 3,
'respa_xi_opt': 1},
'sim_write_def': {
'sim_name': 'water.input-all',
'write_screen_freq': write_freq_screen,
'instant_file': 'water.iavg',
'write_inst_freq': 100000000,
'atm_pos_file': 'water.confp',
'write_pos_freq': write_freq,
'path_cent_file': 'water-centroid.confp',
'path_cent_freq': write_freq,
'atm_vel_file': 'water.confv',
'write_vel_freq': 100000000,
'atm_force_file': 'water.conff',
'write_force_freq': 100000000,
'out_restart_file': 'water.restart',
'write_dump_freq': n_step_eq,
'in_restart_file': 'W512-bulk.initial',
'mol_set_file': 'water.set',
'conf_file_format': 'formatted'}}
#
# compose topology and force field data
#
execfile(fn_FF)
bond = bond_q_TIP4P_F
bend = bend_q_TIP4P_F
inter= inter_q_TIP4P_F
parm = parm_q_TIP4P_F
water_set = [
['molecule_def', {
'mol_parm_file': 'water.parm',
'mol_opt_nhc': 'global',
'num_mol': nwater,
'mol_index': 1,
'mol_name': 'water'}],
['data_base_def', {
'bond_file': 'water.bond',
'bend_file': 'water.bend',
'inter_file': 'water.inter'}]
]
#
# write equilibration simulation directory
#
data = {
'water.input': input_PINY,
'water.bend': bend,
'water.bond': bond,
'water.inter': inter,
'W512-bulk.initial': initial,
'water.parm': parm,
'water.set': water_set}
piny.tools.write_input_directory(data, dir_out_eq)
#
# write production simulation directory
#
input_PINY['sim_gen_def']['num_time_step'] = n_step_prod
input_PINY['sim_gen_def']['restart_type'] = 'restart_all'
input_PINY['sim_write_def']['in_restart_file'] = os.path.join('..', dir_out_eq, 'water.restart')
del data['W512-bulk.initial']
piny.tools.write_input_directory(data, dir_out_prod)
|
yuhangwang/PINY
|
examples/q-TIP4P-F/build.py
|
Python
|
epl-1.0
| 3,923
|
#!/usr/bin/env python
# B a r a K u d a
#
# Prepare 2D maps (monthly) that will later become a GIF animation!
# NEMO output and observations needed
#
# L. Brodeau, november 2016
import sys
import os
import numpy as nmp
from netCDF4 import Dataset
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import datetime
import barakuda_colmap as bcm
import barakuda_tool as bt
year_ref_ini = 1990
#CTATM = 'T255'
CTATM = 'T1279'
if CTATM == 'T255':
# South Greenland:
#i1 = 412; i2 =486
#j1 = 22 ; j2 = 56
# NAtl:
i1 = 385 ; i2= 540
j1 = 6 ; j2 = 84
#Global T255:
#i1 = 0 ; i2 =511
#j1 = 0 ; j2 = 255
elif CTATM == 'T1279':
#
#Global:
i1 = 0 ; i2 = 2559+1
j1 = 0 ; j2 = 1279+1
# Natl:
##i1 = 1849 ; i2 = 2525
##j1 = 97 ; j2 = 508
##i1 = 1960 ; i2 = 2550; #2680
##i1 = 1849 ; i2 = 2525
##j1 = 97 ; j2 = 508
#i1 = 2000 ; i2 = 2590
#j1 = 0 ; j2 = 519
else:
print 'UNKNOW ATMOSPHERE RESOLUTION!'; sys.exit(0)
fig_type='png'
narg = len(sys.argv)
if narg < 4: print 'Usage: '+sys.argv[0]+' <file> <variable> <LSM_file>'; sys.exit(0)
cf_in = sys.argv[1] ; cv_in=sys.argv[2] ; cf_lsm=sys.argv[3]
lsst = False ; lshf = False
if cv_in == 'T2M': lt2m = True
if cv_in == 'SSTK': lsst = True
if cv_in == 'SNHF': lshf = True
if lt2m:
#tmin=-16. ; tmax=28. ; dt = 1.
tmin=-2. ; tmax=28. ; dt = 1.
cpal = 'ncview_nrl'
#cpal = 'jaisnd'
#cpal = '3gauss'
#cpal = 'rainbow2_cmyk'
#cpal = 'rainbow'
#cpal = 'rnb2'
#cpal = 'jaisnc'
#cpal = 'jaisnb'
cfield = 'T2M'
cunit = r'$^{\circ}C$'
cb_jump = 2
if lsst:
tmin=-20. ; tmax=12. ; dt = 1.
cpal = 'sstnw'
cfield = 'SST'
cunit = r'$Boo$'
cb_jump = 2
if lshf:
tmin=-1200. ; tmax=400. ; dt = 25.
#cpal = 'rainbow'
cpal = 'ncview_nrl'
cfield = 'Net Heat Flux'
cunit = r'$W/m^2$'
cb_jump = 4
clsm = 'LSM'
# Need to know dimension:
bt.chck4f(cf_lsm)
id_lsm = Dataset(cf_lsm)
vlon = id_lsm.variables['lon'][:]
vlat = id_lsm.variables['lat'][:]
id_lsm.close()
Ni0 = len(vlon)
Nj0 = len(vlat)
print '\n Dimension of global domain:', Ni0, Nj0
imax=Ni0+1
Ni = i2-i1
Nj = j2-j1
LSM = nmp.zeros((Nj,Ni), dtype=nmp.float)
XIN = nmp.zeros((Nj,Ni))
id_lsm = Dataset(cf_lsm)
if i2 >= imax:
print ' i2 > imax !!! => ', i2, '>', imax
Xall = id_lsm.variables[clsm][0,j1:j2,:]
LSM[:,0:imax-i1] = Xall[:,i1-1:imax]
ii=imax-i1
LSM[:,ii:Ni] = Xall[:,0:i2-imax]
del Xall
else:
LSM[:,:] = id_lsm.variables[clsm][0,j1:j2,i1:i2]
id_lsm.close()
[ nj , ni ] = nmp.shape(LSM)
idx_ocean = nmp.where(LSM[:,:] < 0.5)
LSM[idx_ocean] = nmp.nan
LSM = nmp.flipud(LSM)
params = { 'font.family':'Ubuntu',
'font.size': int(12),
'legend.fontsize': int(12),
'xtick.labelsize': int(12),
'ytick.labelsize': int(12),
'axes.labelsize': int(12) }
mpl.rcParams.update(params)
cfont_clb = { 'fontname':'Arial', 'fontweight':'normal', 'fontsize':13 }
cfont_title = { 'fontname':'Ubuntu Mono', 'fontweight':'normal', 'fontsize':18 }
cfont_mail = { 'fontname':'Times New Roman', 'fontweight':'normal', 'fontstyle':'italic', 'fontsize':9, 'color':'0.5' }
# Pal_Sst:
pal_fld = bcm.chose_colmap(cpal)
norm_fld = colors.Normalize(vmin = tmin, vmax = tmax, clip = False)
pal_lsm = bcm.chose_colmap('blk')
norm_lsm = colors.Normalize(vmin = 0, vmax = 1, clip = False)
vc_fld = nmp.arange(tmin, tmax + dt, dt)
pfin = nmp.zeros((nj,ni))
bt.chck4f(cf_in)
id_in = Dataset(cf_in)
vtime = id_in.variables['time'][:]
id_in.close()
del id_in
Nt = len(vtime)
# Size of the figure:
rat_Nj_Ni = float(Nj)/float(Ni) + 0.12
rh = 7.5
rw = rh/rat_Nj_Ni
FSZ = ( rw , rh )
rcorr = rat_Nj_Ni/(float(Nj0)/float(Ni0))
print ' rcorr => ', rcorr
for jt in range(Nt):
print '\n *** Reading record # '+str(jt+1)+' of '+cv_in+' in '+cf_in
id_in = Dataset(cf_in)
if i2 >= imax:
print ' i2 = ', i2
Xall = id_in.variables[cv_in][jt,j1:j2,:]
XIN[:,0:imax-i1] = Xall[:,i1-1:imax]
ii=imax-i1
XIN[:,ii:Ni] = Xall[:,0:i2-imax]
del Xall
else:
XIN[:,:] = id_in.variables[cv_in][jt,j1:j2,i1:i2]
id_in.close()
del id_in
if lsst or lt2m: XIN[:,:] = XIN[:,:] - 273.15
ct = '%3.3i'%(jt+1)
cd = str(datetime.datetime.strptime(str(year_ref_ini)+' '+ct, '%Y %j'))
cdate = cd[:10] ; print ' *** cdate :', cdate
cfig = 'figs/'+cv_in+'_IFS'+'_d'+ct+'.'+fig_type
fig = plt.figure(num = 1, figsize=FSZ, dpi=None, facecolor='w', edgecolor='k')
ax = plt.axes([0.055, 0.05, 0.9, 1.], axisbg = 'k')
cf = plt.imshow(nmp.flipud(XIN), cmap = pal_fld, norm = norm_fld)
plt.axis([ 0, ni, 0, nj])
# Mask
print ' LSM stuff...'
cm = plt.imshow(LSM, cmap = pal_lsm, norm = norm_lsm)
plt.title('IFS: '+cfield+', coupled ORCA12-'+CTATM+', '+cdate, **cfont_title)
ax2 = plt.axes([0.04, 0.08, 0.93, 0.025])
clb = mpl.colorbar.ColorbarBase(ax2, ticks=vc_fld, cmap=pal_fld, norm=norm_fld, orientation='horizontal', extend='both')
#clb = plt.colorbar(cf, ticks=vc_fld, orientation='horizontal', drawedges=False, pad=0.07, shrink=1., aspect=40)
cb_labs = [] ; cpt = 0
for rr in vc_fld:
if cpt % cb_jump == 0:
cb_labs.append(str(int(rr)))
else:
cb_labs.append(' ')
cpt = cpt + 1
clb.ax.set_xticklabels(cb_labs)
clb.set_label(cunit, **cfont_clb)
del cf
ax.annotate('laurent.brodeau@bsc.es', xy=(1, 4), xytext=(480, -85), **cfont_mail)
plt.savefig(cfig, dpi=160, orientation='portrait', transparent=False)
print cfig+' created!\n'
plt.close(1)
del fig, ax, clb, cm
|
brodeau/barakuda
|
python/exec/movie_square_zoom_IFS.py
|
Python
|
gpl-2.0
| 5,894
|
# Copyright © 2017 Red Hat, Inc. and others.
#
# This file is part of Bodhi.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""This package contains Bodhi's database migrations."""
|
Conan-Kudo/bodhi
|
bodhi/server/migrations/versions/__init__.py
|
Python
|
gpl-2.0
| 843
|
#==============================================================================
# facade.py
# Main gandalf library front-end when invoking gandalf from within python.
#
# This file is part of GANDALF :
# Graphical Astrophysics code for N-body Dynamics And Lagrangian Fluids
# https://github.com/gandalfcode/gandalf
# Contact : gandalfcode@gmail.com
#
# Copyright (C) 2013 D. A. Hubber, G. Rosotti
#
# GANDALF is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# GANDALF is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License (http://www.gnu.org/licenses) for more details.
#==============================================================================
import __main__
import atexit
import time
import types
import defaults
from multiprocessing import Manager, Queue, Event
from plotting import PlottingProcess
from gandalf.analysis.SimBuffer import SimBuffer, BufferException
import subprocess
manager = Manager()
#TODO: in all the Python code, raise proper exceptions rather than a generic Exception
#TODO: the tests should not fail
#------------------------------------------------------------------------------
class Singletons_master:
'''Container class for singletons object. They are:
queue : Queue for sending commands to the plotting process
commands : List of the commands shared with the plotting process.
Caution: if you modify a command, you must reassign it in the
list to make the changes propagate to the other process
completedqueue : Queue used from the plotting process to signal the completion
of a command
globallimits : Dict that for each quantity gives the limits
'''
queue = Queue()
commands = manager.list()
completedqueue = Queue()
globallimits = manager.dict()
free = Event()
class Singletons_serial(Singletons_master):
@staticmethod
def place_command(objects):
Singletons.queue.put(objects)
command, data = Singletons.queue.get()
command.processCommand(plotting, data)
class Singletons_parallel(Singletons_master):
@staticmethod
def place_command(objects):
Singletons.queue.put(objects)
if defaults.parallel:
Singletons=Singletons_parallel
else:
Singletons=Singletons_serial
import commandsource as Commands
from data_fetcher import CreateUserQuantity, CreateTimeData, UserQuantity
from data_fetcher import _KnownQuantities as KnownQuantities
import signal
from time import sleep
from statistics import structure_function
import subprocess
import tempfile
import glob
import os
#figure out if we are in interactive mode
try:
__main__.__file__
interactive=False
except AttributeError:
interactive=True
class Async_sim_fetcher:
'''Object returned by run_async, to be used to get information on the status
of the simuation.
Methods
poll
Return True if the simulation has finished, False otherwise
wait
Block execution until the simulation has finished, and subsequently
load into memory the snapshots produced during the run.
read_snaps
Load into memory the snapshots produced. You need to call this function
only poll reports that the simulation has finished; if using wait
this is done automatically. An Exception will be raised if attempting
to call this function for a simulation that has not finished yet.
'''
def __init__(self,sim):
self._sim=sim
self._finished=False
def read_snaps(self):
try:
finished=self.poll()
except NotImplementedError:
finished=self._finished
if not finished:
raise Exception("The simulation has not finished yet, you can't read the snapshots.")
SimBuffer.load_snapshots(self._sim)
class MPI_Popen(Async_sim_fetcher):
def __init__(self,sim,comm):
Async_sim_fetcher.__init__(self, sim)
self._comm=comm
self._barrier=None
def wait(self):
if self._major_version()<3:
self._comm.Barrier()
self._finished=True
else:
if self._barrier is None:
self._barrier=self._comm.Ibarrier()
self._barrier.wait()
self.read_snaps()
return 0
def poll(self):
if self._major_version()<3:
raise NotImplementedError("This MPI version does not support polling. You need to wait!")
if self._barrier is None:
self._barrier=self._comm.Ibarrier()
test=self._barrier.test()[0]
return test
def _major_version(self):
from mpi4py import MPI
major_vers=int(MPI.Get_version()[0])
return major_vers
class proxy_Popen(Async_sim_fetcher):
def __init__(self,sim,p):
Async_sim_fetcher.__init__(self, sim)
self._p = p
def wait(self):
errorcode=self._p.wait()
self.read_snaps()
return errorcode
def poll(self):
code= self._p.poll()
if code is None:
return False
else:
return True
#TODO: add function for resizing (programmatically) the figure
#------------------------------------------------------------------------------
def handle(e):
'''This functions takes care of printing information about an error,
if we are in interactive mode, or re-raising it, if we are in script mode
(so that the execution of the script can stop if nobody catches the exception)
'''
if interactive:
print str(e)
else:
raise e
#------------------------------------------------------------------------------
def loadsim(run_id, fileformat=None, buffer_flag='cache'):
'''
Given the run_id of a simulation, reads it from the disk.
Returns the newly created simulation object.
Arguments:
run_id(str): Simulation run identification string.
Keyword Args:
fileformat: Format of all snapshot files of simulation.
buffer_flag: Record snapshot data in simulation buffer.
'''
SimBuffer.loadsim(run_id, fileformat=fileformat, buffer_flag=buffer_flag)
return SimBuffer.get_current_sim()
class Plotting:
def __init__(self):
self.lastid=0
import matplotlib.pyplot as plt
self.plt=plt
self.axesimages = {}
self.commands=Singletons.commands
self.commandsfigures = {}
self.quantitiesfigures = {}
self.globallimits = Singletons.globallimits
def command_in_list (self, id):
for command in Singletons.commands:
if command.id==id:
return True
return False
#------------------------------------------------------------------------------
def plot(x, y, type="default", snap="current", sim="current",
overplot=False, autoscale=False, xunit="default", yunit="default",
xaxis="linear", yaxis="linear", **kwargs):
'''Plot particle data as a scatter plot. Creates a new plotting window if
one does not already exist.
Args:
x (str) : Quantity on the x-axis.
y (str) : Quantity on the y-axis.
Keyword Args:
type : The type of the particles to plot (e.g. 'star' or 'sph').
snap : Number of the snapshot to plot. Defaults to 'current'.
sim : Number of the simulation to plot. Defaults to 'current'.
overplot (bool) : If True, overplots on the previous existing plot rather
than deleting it. Defaults to False.
autoscale : If True, the limits of the plot are set
automatically. Can also be set to 'x' or 'y' to specify
that only one of the axis has to use autoscaling.
If False (default), autoscaling is not used. On an axis that does
not have autoscaling turned on, global limits are used
if defined for the plotted quantity.
xunit (str) : Specify the unit to use for the plotting for the quantity
on the x-axis.
yunit (str) : Specify the unit to use for the plotting for the quantity
on the y-axis.
**kwargs : Extra keyword arguments will be passed to matplotlib.
Return:
Data plotted. The member x_data contains data on the x-axis and the member y_data
contains data on the y-axis
'''
simno = get_sim_no(sim)
overplot=to_bool(overplot)
# If we are plotting all particle species, call plot in turn
if type=="all":
sim = SimBuffer.get_sim_no(simno)
snapobject = SimBuffer.get_snapshot_extended(sim, snap)
nspecies = snapobject.GetNTypes()
for ispecies in range(nspecies):
plot(x,y,snapobject.GetSpecies(ispecies),snap,simno,
(overplot or ispecies>0),autoscale,xunit,yunit,
xaxis,yaxis,**kwargs)
return
command = Commands.ParticlePlotCommand(x, y, type, snap, simno, overplot,
autoscale, xunit, yunit,
xaxis, yaxis, **kwargs)
data = command.prepareData(Singletons.globallimits)
Singletons.place_command([command, data])
sleep(0.001)
return data
#------------------------------------------------------------------------------
def time_plot(x, y, sim="current", overplot=False, autoscale=False,
xunit="default", yunit="default", xaxis="linear",
yaxis="linear", idx=None, idy=None, id=None,
typex="default", typey="default", type="default", **kwargs):
'''Plot two quantities as evolved in time one versus the another. Creates
a new plotting window if one does not already exist.
Args:
x (str): Quantity on x-axis. The quantity is looked
up in the quantities defined as a function of time. If it is
not found there, then we try to interpret it as a quantity
defined for a particle. In this case, the user needs to pass
either idx either id to specify which particle he wishes
to look-up.
y (str): Quantity on y-axis. The interpretation is
like for the previous argument.
Keyword Args:
sim: Number of the simulation to plot. Defaults to 'current'.
overplot (bool): If True, overplots on the previous existing plot rather
than deleting it. Defaults to False.
autoscale: If True, the limits of the plot are set
automatically. Can also be set to 'x' or 'y' to specify
that only one of the axis has to use autoscaling.
If False (default), autoscaling is not used. On an axis that
does not have autoscaling turned on, global limits are used
if defined for the plotted quantity.
xunit (str): Specify the unit to use for the plotting for the quantity
on the x-axis.
yunit (str): Specify the unit to use for the plotting for the quantity
on the y-axis.
idx (int): id of the particle to plot on the x-axis. Ignored if the
quantity given (e.g., com_x) does not depend on the id.
idy (int): same as previous, on the y-axis.
id (int) : same as the two previous ones. To be used when the id is the
same on both axes. If set, overwrites the passed idx and idy.
typex (str): type of particles on the x-axis. Ignored if the quantity
given does not depend on it
typey (str): as the previous one, on the y-axis.
type (str): as the previous ones, for both axis at the same time. If set,
overwrites typex and typey.
Return:
Data plotted. The member x_data contains data on the x-axis and the member y_data
contains data on the y-axis
'''
simno = get_sim_no(sim)
overplot = to_bool(overplot)
command = Commands.TimePlot(x, y,simno,overplot,autoscale,
xunit,yunit,xaxis,yaxis,idx, idy, id,
typex, typey, type, **kwargs)
data = command.prepareData(Singletons.globallimits)
Singletons.place_command([command, data])
return data
#------------------------------------------------------------------------------
def render(x, y, render, snap="current", sim="current", overplot=False,
autoscale=False, autoscalerender=False, coordlimits=None,
zslice=None, xunit="default", yunit="default",
renderunit="default", res=64, interpolation='nearest',lognorm=False,
type='sph',**kwargs):
'''Create a rendered plot from selected particle data.
Args:
x (str): Quantity on the x-axis.
y (str): Quantity on the y-axis.
render (str): Quantity to be rendered.
Keyword Args:
snap : Number of the snapshot to plot. Defaults to \'current\'.
sim : Number of the simulation to plot. Defaults to \'current\'.
overplot (bool): If True, overplots on the previous existing plot rather
than deleting it. Defaults to False.
autoscale: If True, the coordinate limits of the plot are set
automatically. Can also be set to 'x' or 'y' to specify
that only one of the axis has to use autoscaling.
If False (default), autoscaling is not used. On an axis that
does not have autoscaling turned on, global limits are used
if defined for the plotted quantity.
autoscalerender: Same as the autoscale, but for the rendered quantity.
coordlimits: Specify the coordinate limits for the plot. In order of
precedence, the limits are set in this way:
* What this argument specifies. The value must be an
iterable of 4 elements: (xmin, xmax, ymin, ymax).
* If this argument is None (default), global settings for
the quantity are used.
* If global settings for the quantity are not defined,
the min and max of the data are used.
zslice (float): z coordinate of the slice when doing a slice rendering.
Default is None, which produces a column-integrated plot.
If you set this variable, instead a slice rendering will
be done.
xunit (str): Specify the unit to use for the plotting for the quantity
on the x-axis.
yunit (str): Specify the unit to use for the plotting for the quantity
on the y-axis.
renderunit (str): Specify the unit to use for the plotting for the rendered
quantity.
res: Specify the resolution. Can be an integer number, in which
case the same resolution will be used on the two axes, or a
tuple (e.g., (xres, yres)) of two integer numbers, if you
want to specify different resolutions on the two axes.
interpolation: Specify the interpolation to use. Default is nearest,
which will show the pixels of the rendering grid. If one
wants to smooth the image, bilinear or bicubic could be
used. See pyplot documentation for the full list of
possible values.
lognorm (bool): Specify wheter the colour scale should be
logarithmic (default: linear). If you want to customise the
limits, use the vmin and vmax flags which are passed to
matplotlib
type (str): Specify the type of particles to be used for rendering (defaults to sph)
**kwarg: Extra keyword arguments will be passed to matplotlib.
Return:
Data plotted. The member render_data contains the actual image (2d array).
'''
if zslice is not None:
zslice = float(zslice)
simno = get_sim_no(sim)
overplot = to_bool(overplot)
autoscalerender = to_bool(autoscalerender)
if coordlimits is not None and isinstance(coordlimits, types.StringTypes):
coordlimits = to_list (coordlimits, float)
if isinstance(res, types.StringTypes):
if res[0]=='[' or res[0]=='(':
res = to_list(res,int)
else:
res = int(res)
command = Commands.RenderPlotCommand(x, y, render, snap, simno, overplot,
autoscale, autoscalerender,
coordlimits, zslice, xunit, yunit,
renderunit, res, interpolation,lognorm,type,**kwargs)
data = command.prepareData(Singletons.globallimits)
Singletons.place_command([command, data])
return data
#------------------------------------------------------------------------------
def renderslice(x, y, renderq, zslice, **kwargs):
'''Thin wrapper around render that does slice rendering.
Args:
x (str): Quantity on the x-axis.
y (str): Quantity on the y-axis.
renderq (str): Quantity to be rendered.
zslice (float): z-coordinate of the slice.
Keyword Args:
See documentation of the render function.
'''
data=render(x, y, renderq, zslice=zslice, **kwargs)
return data
#------------------------------------------------------------------------------
def addrenderslice(x, y, renderq, zslice, **kwargs):
'''Thin wrapper around renderslice that sets overplot to True. If autoscale is
not explicitly set, it will be set to False to preserve the existing settings.
Args:
x (str): Quantity on the x-axis.
y (str): Quantity on the y-axis.
renderq (str): Quantity to be rendered.
zslice (float): z-coordinate of the slice.
Keyword Args:
See documentation of the render function.
'''
try:
kwargs['autoscale']
except KeyError:
kwargs['autoscale']=False
data=render(x, y, renderq, zslice=zslice, overplot=True, **kwargs)
return data
#------------------------------------------------------------------------------
def addrender(x, y, renderq, **kwargs):
'''Thin wrapper around render that sets overplot to True. If autoscale is
not explicitly set, it will be set to False to preserve the existing settings.
Args:
x (str): Quantity on the x-axis.
y (str): Quantity on the y-axis.
renderq (str): Quantity to be rendered.
Keyword Args:
See documentation of the render function.
'''
try:
kwargs['autoscale']
except KeyError:
kwargs['autoscale']=False
data=render(x, y, renderq, overplot=True, **kwargs)
return data
#------------------------------------------------------------------------------
def make_movie(filename, snapshots='all', window_no=0, fps=24):
'''Generates movie for plots generated in given window
Args:
filename (str): filename (with extension, e.g. mp4) of the movie that will
be created.
snapshots (str): currently not used
window_no (int): currently not used
fps (int): frames per second
'''
# Remove all temporary files in the directory (in case they still exist)
tmpfilelist = glob.glob('tmp.?????.png')
for file in tmpfilelist:
os.remove(file)
sim = SimBuffer.get_current_sim()
nframes = len(sim.snapshots)
# Loop through all snapshots and create temporary images
if snapshots == 'all':
for isnap in range(len(sim.snapshots)):
snap(isnap)
tmpfile = 'tmp.' + str(isnap).zfill(5) + '.png'
savefig(tmpfile)
# Wait until all plotting processes have finished before making mp4 file
if defaults.parallel:
Singletons.free.wait()
# Now join all temporary files together with ffmpeg
subprocess.call(["ffmpeg","-y","-r",str(fps),"-i", "tmp.%05d.png", \
"-vcodec","mpeg4", "-qscale","5", "-r", str(fps), \
filename])
# Now remove all temporary files just created to make movie
tmpfilelist = glob.glob('tmp.?????.png')
for file in tmpfilelist:
os.remove(file)
#------------------------------------------------------------------------------
def limit(quantity, min=None, max=None, auto=False,
window='current', subfigure='current'):
'''Set plot limits. Quantity is the quantity to limit.
Args:
quantity (str): Set limits of this variable.
Keyword Args:
min (float): Minimum value of variable range.
max (float): Maximum value of variable range.
auto (bool): If auto is set to True, then the limits for that quantity are
set automatically. Otherwise, use the one given by max and min.
window (str): By default only the current subplot of the current window is affected.
If this parameter is set to 'all', all the current windows are affected.
If this parameter is set to 'global', then also future plots are affected.
subfigure (str): Similarly to window, by default only the current subplot is affected
by this command. If this parameter is set to 'all' then all the subfigures
in the current window are affected.
'''
if min is not None:
min = float(min)
if max is not None:
max = float(max)
if not auto:
auto=to_bool(auto)
if window=='all' and subfigure=='current':
subfigure=='all'
command = Commands.LimitCommand(quantity, min, max, auto, window, subfigure)
Singletons.place_command([command,None])
if window=='global':
okflag=Singletons.completedqueue.get()
print okflag
#------------------------------------------------------------------------------
def addplot(x, y, **kwargs):
'''Thin wrapper around plot that sets overplot to True. All the other
arguments are the same. If autoscale is not explicitly set, it will be set
to False to preserve the existing settings.
Args:
x (str): Quantity on the x-axis.
y (str): Quantity on the y-axis.
Keyword Args:
See documentation of the plot function.
'''
try:
kwargs['autoscale']
except KeyError:
kwargs['autoscale']=False
data=plot(x, y, overplot=True, **kwargs)
return data
#------------------------------------------------------------------------------
def next():
'''Advances the current snapshot of the current simulation.
Return the new snapshot, or None if the call failed.'''
try:
snapshot=snap(SimBuffer.get_no_next_snapshot())
return snapshot
except BufferException as e:
handle(e)
#------------------------------------------------------------------------------
def previous():
'''Decrements the current snapshot of the current simulation.
Return the new snapshot, or None if the call failed.'''
try:
snapshot=snap(SimBuffer.get_no_previous_snapshot())
return snapshot
except BufferException as e:
handle(e)
#------------------------------------------------------------------------------
def snap(no):
'''Jump to the given snapshot number of the current simulation. Note that
you can use standard Numpy index notation (e.g., -1 is the last snapshot).
Return the new snapshot, or None if the call failed.
Args:
snapno (int): Snapshot number
Returns:
The snapshot object
'''
no = int(no)
snapshot=None
try:
snapshot=SimBuffer.set_current_snapshot_number(no)
except BufferException as e:
handle(e)
if snapshot is not None:
update("current")
return snapshot
#------------------------------------------------------------------------------
def window(no = None):
'''Changes the current window to the number specified. If the window
doesn\'t exist, recreate it.
Args:
winno (int): Window number
'''
if no is not None:
no=int(no)
command = Commands.WindowCommand(no)
data = None
Singletons.place_command([command,data])
#------------------------------------------------------------------------------
def subfigure(nx, ny, current):
'''Creates a subplot in the current window.
Args:
nx (int): x-grid size
ny (int): y-grid size
current (int): id of active sub-figure. If sub-figure already exists,
then this sets the new active sub-figure.
'''
nx = int(nx)
ny = int(ny)
current = int(current)
command = Commands.SubfigureCommand(nx, ny, current)
data = None
Singletons.place_command([command,data])
#------------------------------------------------------------------------------
def newsim(paramfile=None, ndim=None, sim=None):
'''Create a new simulation object. Need to specify either the parameter
file, or the number of dimensions and the simulation type. Note that it is not
possible to change the number of dimensions afterwards or simulation type
afterwards.
'''
return SimBuffer.newsim(paramfile=paramfile, ndim=ndim, simtype=sim)
#------------------------------------------------------------------------------
def setupsim():
'''Set up the current simulation object. Note that after calling this function,
no parameter change it\'s possible.
'''
sim = SimBuffer.get_current_sim()
sim.SetupSimulation()
sim.simparams.RecordParametersToFile()
#------------------------------------------------------------------------------
def run(no=None):
'''Run a simulation. If no argument is given, run the current one;
otherwise queries the buffer for the given simulation number.
If the simulation has not been setup, does it before running.
Keyword Args:
no(int): Simulation number
'''
#gets the correct simulation object from the buffer
try:
if no is None:
sim = SimBuffer.get_current_sim()
else:
no = int(no)
sim = SimBuffer.get_sim_no(no)
except BufferError as e:
handle(e)
#setup the simulation
if not sim.setup:
sim.SetupSimulation()
sim.simparams.RecordParametersToFile()
SimBuffer.load_live_snapshot(sim)
while sim.t < sim.tend and sim.Nsteps < sim.Nstepsmax:
#TODO: maybe some of these operations could be done in another thread, so that the computation is
#not slowed down when compared to the stand-alone c++ executable
#But need to think carefully, because of the GIL... (??)
snap_list = sim.InteractiveRun()
for snap in snap_list:
SimBuffer.add_snapshot(snap, sim)
SimBuffer.load_live_snapshot(sim)
update("live")
def run_async(no=None,maxprocs=4):
'''Run the current simulation in async mode, i.e. in the background. Return
an Async_sim_fetcher object that can be used the query the status of the
simulation (see its documentation for more details). The results will NOT
be available until the user calls wait on the returned object, or, if the
simulation has already finished (which can be checked by calling poll),
calling load_snaps.
Keyword Args:
no(int): Simulation number
maxproc (int): if compiled with MPI, specifies how many processes
to use
'''
#get the correct simulation object from the buffer
try:
if no is None:
sim = SimBuffer.get_current_sim()
else:
no = int(no)
sim = SimBuffer.get_sim_no(no)
except BufferError as e:
handle(e)
#setup the simulation
if not sim.setup:
sim.SetupSimulation()
sim.simparams.RecordParametersToFile()
param_path=sim.GetParam('run_id')+'.param'
dir_path = os.path.dirname(os.path.realpath(__file__))
gandalf_path=os.path.join(dir_path,'../bin/gandalf')
if sim.MPI:
from mpi4py import MPI
comm=MPI.COMM_SELF.Spawn(gandalf_path, param_path, maxprocs=maxprocs)
async_fetcher=MPI_Popen(sim,comm)
return async_fetcher
else:
print param_path
p=subprocess.Popen([gandalf_path,param_path])
async_fetcher=proxy_Popen(sim,p)
return async_fetcher
#------------------------------------------------------------------------------
def block(message="Press enter to quit..."):
'''Stops the execution flow until the user presses 'enter'.
Useful in scripts, allowing to see a plot (which otherwise gets closed
as soon as the execution flow reaches the end of the script)
Keyword Args:
message (str): text to print before pausing
'''
print message
raw_input()
#------------------------------------------------------------------------------
def update(type=None):
'''Updates all the plots. You should never call directly this function,
because all the plotting functions should call this function for you.
If you run into a situation when you need it, please contact the authors,
because you probably just spotted a bug in the code.
'''
#updates the plots
for command in Singletons.commands:
updateplot=False
if type is None:
updateplot=True
else:
try:
if command.snap == type:
updateplot=True
except AttributeError:
updateplot=False
if updateplot:
data = command.prepareData(Singletons.globallimits)
Singletons.place_command([command, data])
#------------------------------------------------------------------------------
def savefig(name):
'''Save the current figure with the given name. Note that matplotlib
figures out automatically the type of the file from the extension.
Args:
name (str): filename (including extension)
'''
command = Commands.SaveFigCommand(name)
data = None
Singletons.place_command([command,data])
time.sleep(1e-3)
#------------------------------------------------------------------------------
def switch_nongui():
'''Switches matplotlib backend, disabling interactive plotting.
Useful in scripts where no interaction is required
'''
command = Commands.SwitchNonGui()
data = None
Singletons.place_command([command,data])
time.sleep(1e-3)
#------------------------------------------------------------------------------
def plotanalytical(x=None, y=None, ic="default", snap="current", sim="current",
overplot=True, autoscale=False, xunit="default",
yunit="default", time="snaptime"):
'''Plots the analytical solution. Reads the problem type from the \'ic\'
parameter and plots the appropriate solution if implemented. If no solution
exists, then nothing is plotted.
Keyword Args:
x (str): Quantity on the x-axis.
y (str): Quantity on the y-axis.
snap : Number of the snapshot to plot. Defaults to 'current'.
sim : Number of the simulation to plot. Defaults to 'current'.
overplot (bool): If True, overplots on the previous existing plot rather
than deleting it. Defaults to False.
autoscale: If True, the limits of the plot are set
automatically. Can also be set to 'x' or 'y' to specify
that only one of the axis has to use autoscaling.
If False (default), autoscaling is not used. On an axis that does
not have autoscaling turned on, global limits are used
if defined for the plotted quantity.
xunit (str): Specify the unit to use for the plotting for the quantity
on the x-axis.
yunit (str): Specify the unit to use for the plotting for the quantity
on the y-axis.
time: Plots the analytical solution for the given time.
If not set, then reads the time from the sim or snapshot
Return:
Data plotted. The member x_data contains data on the x-axis and the member y_data
contains data on the y-axis
'''
#TODO: figure out automatically the quantities to plot depending on current window
simno = get_sim_no(sim)
overplot = to_bool(overplot)
command = Commands.AnalyticalPlotCommand(x, y, ic, snap, simno, overplot,
autoscale, xunit, yunit)
data = command.prepareData(Singletons.globallimits, time)
Singletons.place_command([command, data])
return data
#------------------------------------------------------------------------------
def rescale(quantity, unitname, window="current"):
'''Rescales the specified quantity in the specified window to the specified unit
Args:
quantity (str): Quantity to be rescaled.
unitname (str): Required unit for quantity.
Keyword args:
window : Window containing plot. Can be either the string "current" or
an integer specifying the window.
'''
command = Commands.RescaleCommand(quantity, unitname, window)
Singletons.place_command([command,None])
okflag = Singletons.completedqueue.get()
print okflag
update()
#------------------------------------------------------------------------------
def sims():
'''Print a list of the simulations to screen'''
print "These simulations are currently loaded into memory:"
for num, sim in enumerate(SimBuffer.simlist):
print str(num) + ' ' + sim.simparams.stringparams["run_id"]
#------------------------------------------------------------------------------
def snaps(simno):
'''For the given simulation number, print a list of all the snapshots
Args:
simno (int): Simulation number from which to print the snapshot list.
'''
simno = int(simno)
sim = SimBuffer.get_sim_no(simno)
print "The run_id of the requested simulation is " + sim.simparams.stringparams["run_id"]
print "These are the snapshots that we know about for this simulation:"
for num, snap in enumerate(sim.snapshots):
#TODO: snap.t is set correctly only the first time that the snapshot is read from the disc, should be fixed
print str(num) + ' ' + snap.filename + " " + str(snap.t)
try:
live = None
live = sim.live
except AttributeError:
pass
if live is not None:
print "In addition, there is a live snapshot in memory, at time " + str(live.t)
#------------------------------------------------------------------------------
def set_current_sim(simno):
'''Set the current simulation to the given number.
Keyword Args:
simno (int): Simulation number
Returns:
The newly set current simulation
'''
simno = int(simno)
return SimBuffer.set_current_sim_no(simno)
#------------------------------------------------------------------------------
def get_sim_no(sim):
'''Returns the simulation id of the currently active simulation object
Required argument:
sim : Simulation
'''
if sim == "current":
simno = SimBuffer.get_current_sim_no()
else:
simno = int(sim)
return simno
def get_data(quantity, snap="current",type="default",sim="current",unit="default" ):
'''Returns the array with the data for the given quantity.
The data is returned scaled to the specified unit
Args:
quantity (str):The quantity required.
Keyword Args:
type (str):The type of the particles (e.g. 'star')
snap:Number of the snapshot. Defaults to 'current'
sim:Number of the simulation. Defaults to 'current'
unit (str):Specifies the unit to use to return the data
Returns:
A numpy array containing the requested data.
'''
simno = get_sim_no(sim)
sim = SimBuffer.get_sim_no(simno)
snapobject = SimBuffer.get_snapshot_extended(sim, snap)
nspecies = snapobject.GetNTypes()
if type=="all":
raise Exception("You requested all particle types to get_data, but we can return only one array!")
fetcher=UserQuantity(quantity)
unitinfo,data,scaling,label=fetcher.fetch(type=type,snap=snapobject,unit=unit)
return data*scaling
def get_render_data(x,y,quantity, sim="current",snap="current",
renderunit="default",
res=64,zslice=None,coordlimits=None):
'''Return the rendered data for the given quantity. Useful when one needs
to grid SPH data. The result is scaled to the specified unit. The options are
a subset of the options available to the 'render' function.
Args:
x (str): Quantity on the x-axis.
y (str): Quantity on the y-axis.
quantity (str): Quantity to render.
Keyword Args:
snap: Number of the snapshot to plot. Defaults to 'current'.
sim : Number of the simulation to plot. Defaults to 'current'
renderunit (quantity): Unit to use for the rendered quantity
res: Resolution
zslice (float): z-coordinate of the slice when doing a slice rendering.
Default is None, which produces a column-integrated plot.
If you set this variable, a slice rendering will be
done instead.
coordlimits: Limits of the coordinates on x and y. See documentation
of render.
Return:
A numpy 2d array containig the rendered data, scaled to the requested unit.
'''
if zslice is not None:
zslice = float(zslice)
simno=get_sim_no(sim)
if coordlimits is not None and isinstance(coordlimits, types.StringTypes):
coordlimits = to_list (coordlimits, float)
if isinstance(res, types.StringTypes):
if res[0]=='[' or res[0]=='(':
res = to_list(res,int)
else:
res = int(res)
command = Commands.RenderPlotCommand(x, y, quantity, snap, simno, True,
True, True,
coordlimits, zslice, "default", "default",
renderunit, res, "nearest")
data = command.prepareData(Singletons.globallimits)
return data.render_data
def get_analytical_data (x=None, y=None, ic="default", snap="current", sim="current",
xunit="default",
yunit="default", time="snaptime"):
'''Return the data of the analytical solution (if it exists). Read the problem type from
the \'ic\' parameter.
Keyword Args:
x (str): First quantity to be returned; normally you want it to be a spatial coordinate
y (str): Second quantity to be returned.
snap : Number of the snapshot to plot. Defaults to 'current'.
sim : Number of the simulation to plot. Defaults to 'current'.
xunit (str): Specify the unit for the first quantity.
yunit (str): Specify the unit for the second quantity.
time: Return the data for the analytical solution for the given time.
If not set, then read the time from the sim or snapshot
Return:
Requested data. The member x_data contains the first quantity and the member y_data
the second quantity
'''
simno = get_sim_no(sim)
command = Commands.AnalyticalPlotCommand(x, y, ic, snap, simno, True,
True, xunit, yunit)
data = command.prepareData(Singletons.globallimits, time)
return data
#------------------------------------------------------------------------------
def get_time_data(x, y, sim="current", overplot=False, autoscale=False,
xunit="default", yunit="default", xaxis="linear",
yaxis="linear", idx=None, idy=None, id=None,
typex="default", typey="default", type="default", **kwargs):
'''Returns the data of two quantities as evolved in time one versus the another.
Args:
x (str): Quantity on x-axis. The quantity is looked
up in the quantities defined as a function of time. If it is
not found there, then we try to interpret it as a quantity
defined for a particle. In this case, the user needs to pass
either idx either id to specify which particle he wishes
to look-up.
y (str): Quantity on y-axis. The interpretation is
like for the previous argument.
Keyword Args:
sim: Number of the simulation to plot. Defaults to 'current'.
autoscale: If True, the limits of the plot are set
automatically. Can also be set to 'x' or 'y' to specify
that only one of the axis has to use autoscaling.
If False (default), autoscaling is not used. On an axis that
does not have autoscaling turned on, global limits are used
if defined for the plotted quantity.
xunit (str): Specify the unit to use for the plotting for the quantity
on the x-axis.
yunit (str): Specify the unit to use for the plotting for the quantity
on the y-axis.
idx (int): id of the particle to plot on the x-axis. Ignored if the
quantity given (e.g., com_x) does not depend on the id.
idy (int): same as previous, on the y-axis.
id (int) : same as the two previous ones. To be used when the id is the
same on both axes. If set, overwrites the passed idx and idy.
typex (str): type of particles on the x-axis. Ignored if the quantity
given does not depend on it
typey (str): as the previous one, on the y-axis.
type (str): as the previous ones, for both axis at the same time. If set,
overwrites typex and typey.
Return:
Data plotted. The member x_data contains data on the x-axis and the member y_data
contains data on the y-axis
'''
simno = get_sim_no(sim)
overplot = to_bool(overplot)
command = Commands.TimePlot(x, y, simno, True, autoscale,
xunit, yunit, xaxis, yaxis, idx, idy, id,
typex, typey, type, **kwargs)
data = command.prepareData(Singletons.globallimits)
return data
#------------------------------------------------------------------------------
def to_list(str_variable,type):
'''Convert the input string to a list of the specified type'''
parenthesis_open = ('[', '(')
parenthesis_closed = (']',')')
if str_variable[0] not in parenthesis_open or str_variable[-1] not in parenthesis_closed:
raise ValueError('What you passed cannot be parsed as a tuple')
splitted = str_variable[1:-1].split(',')
return map(type,splitted)
#------------------------------------------------------------------------------
def to_bool(value):
'''Parses the input string and convert it to a boolean. If the input is
not a string, passes it to the built-in bool function (which means, that
the result is False only if it is None or False).'''
valid = {'true': True, 't': True, '1': True,
'false': False, 'f': False, '0': False,
}
if not isinstance(value, types.StringTypes):
return bool(value)
lower_value = value.lower()
if lower_value in valid:
return valid[lower_value]
else:
raise ValueError('invalid literal for boolean: "%s"' % value)
#------------------------------------------------------------------------------
def sigint(signum, frame):
cleanup()
#------------------------------------------------------------------------------
def cleanup():
Singletons.place_command(["STOP",None])
print "Waiting for background processes to finish..."
plottingprocess.join()
import sys
sys.exit()
def ListFunctions():
'''List the available functions defined in facade'''
import gandalf_interpreter
toexcludefunctions=gandalf_interpreter.toexcludefunctions
functions = inspect.getmembers(facade, inspect.isfunction)
functions=filter(lambda function: function not in toexcludefunctions, functions)
print "The available functions in facade are: "
for function in functions:
print function.__name__
#------------------------------------------------------------------------------
def init():
if defaults.parallel:
global plottingprocess
plottingprocess = PlottingProcess(Singletons.queue, Singletons.commands, Singletons.completedqueue, Singletons.globallimits, Singletons.free)
plottingprocess.start()
else:
global plotting
plotting=Plotting()
CreateUserQuantity('r','sqrt(x^2+y^2+z^2)',scaling_factor='r', label='$r$')
CreateUserQuantity('R','sqrt(x^2+y^2)',scaling_factor='r', label='$R$')
CreateUserQuantity('phi','arctan2(y,x)', label='$\\phi$')
CreateUserQuantity('theta','arccos(z/r)', label='$\\theta$')
CreateUserQuantity('vr','sin(theta)*cos(phi)*vx+sin(theta)*sin(phi)*vy+cos(theta)*vz',scaling_factor='v',label='$v_r$')
CreateUserQuantity('vR','sin(theta)*cos(phi)*vx+sin(theta)*sin(phi)*vy',scaling_factor='v',label='$v_R$')
CreateUserQuantity('vR2d','cos(phi)*vx+sin(phi)*vy',scaling_factor='v',label='$v_R$')
CreateUserQuantity('vphi','cos(phi)*vy-sin(phi)*vx',scaling_factor='v',label='$v_\\phi$')
CreateUserQuantity('vtheta','cos(theta)*cos(phi)*vx+cos(theta)*sin(phi)*vy-sin(theta)*vz',scaling_factor='v', label='$v_\\theta$')
CreateUserQuantity('ar','sin(theta)*cos(phi)*ax+sin(theta)*sin(phi)*ay+cos(theta)*az',scaling_factor='a',label='$a_r$')
CreateUserQuantity('aR','sin(theta)*cos(phi)*ax+sin(theta)*sin(phi)*ay',scaling_factor='a',label='$a_R$')
CreateUserQuantity('aphi','cos(phi)*vy-sin(phi)*vx',scaling_factor='a',label='$a_\\phi$')
CreateUserQuantity('atheta','cos(theta)*cos(phi)*vx+cos(theta)*sin(phi)*vy-sin(theta)*vz',scaling_factor='a', label='$a_\\theta$')
CreateUserQuantity('press','(gamma_eos - 1)*rho*u',scaling_factor='press',label='$P$')
CreateUserQuantity('sound','sqrt(gamma_eos*(gamma_eos - 1)*u)',scaling_factor='v', label='$c_s$')
CreateUserQuantity('temp','(gamma_eos - 1)*u*mu_bar',scaling_factor='temp',label='T')
from data_fetcher import get_time_snapshot
CreateTimeData('t',get_time_snapshot)
from compute import COM
CreateTimeData('com_x',COM)
CreateTimeData('com_y',COM,quantity='y')
CreateTimeData('com_z',COM,quantity='z')
CreateTimeData('com_vx',COM,quantity='vx')
CreateTimeData('com_vy',COM,quantity='vy')
CreateTimeData('com_vz',COM,quantity='vz')
# Default code. Run when facade.py is imported
#------------------------------------------------------------------------------
init()
if defaults.parallel:
signal.signal(signal.SIGINT, sigint)
signal.signal(signal.SIGTERM, sigint)
signal.signal(signal.SIGSEGV, sigint)
atexit.register(cleanup)
#------------------------------------------------------------------------------
if __name__=="__main__":
loadsim('TEST')
plot("x","rho")
plotanalytical("x","rho")
limit('x', -10.0, 10.0)
snap(1)
import time; time.sleep(2)
next(); time.sleep(2)
snap(8)
limit('x', 'auto')
print 'L1 error norm : ',L1errornorm("x","rho",1.0,8.0)
block()
#
# loadsim('TEST')
# plot("x","y", snap=0)
# addplot("x", "y")
# window()
# plot("vx", "vy")
# plot("vx", "x")
# window()
# plot("x","rho")
# window()
# subfigure(2,2,1)
# plot("x", "y")
# subfigure(2,2,2)
# plot("vx", "vy")
# subfigure(2,2,3)
# plot("x", "rho")
# subfigure(2,2,4)
# plot("rho", "h")
# addplot("rho", "m")
# window(3)
# addplot("rho", "h")
# snap(99)
# for i in range(10):
# time.sleep(1)
# previous()
|
gandalfcode/gandalf
|
analysis/facade.py
|
Python
|
gpl-2.0
| 49,171
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 7 10:22:32 2015
Finished on Fri Jul 1 22:59:40 2016
@author: Daniel Danis <daniel.danis@savba.sk>
Use this script to create gene panel (GP) in BED format. Take infro from Ensembl's GTF file.
The purpose of GP is to define genome-scaled regions of interest.
As Whole Exome Sequencing produces plenty variants, GP is used to focus on defined
set of genes which may have been associated with the disease before.
The list of Ensembl Gene IDs or Associated gene names is used to select genes, 'protein_coding' transcripts or exons.
Upstream and downstream from UTR gene regions can be reported for each transcript.
Exon padding can be set in 5' and 3' direction.
"""
import argparse
import datetime
import logging
import libgpc
VERSION = 'v0.1.0'
def parse_arguments():
parser = argparse.ArgumentParser(
description="Use this script to create BED files containing regions of genes you are interested in. You need "
"to sort BED after creation with bedtools sort.")
parser.add_argument("gene_ids",
help="Path to ENSEMBL IDs or Associated gene names")
parser.add_argument("index_file", help="Gtf cache index path")
parser.add_argument("-o", "--out_file", default=None, help="Where to save BED file. Default '<input_name>.bed'")
parser.add_argument("-l", "--log_path", default=None, help="Where to save log. Default current directory")
parser.add_argument("--fmt", default='exon',
help="Output record format. Choose from exon, transcript or gene. Default: exon")
parser.add_argument("-5", "--five_exon_padding", default=0, help="Set upstream exon padding. Default 0 bp.")
parser.add_argument("-3", "--three_exon_padding", default=0, help="Set downstream exon padding. Default 0 bp.")
parser.add_argument("-u", "--upstream", default=0, help="Set upstream region size. Default 0 bp. ")
parser.add_argument("-d", "--downstream", default=0, help="Set downstream region size. Default 0 bp. ")
parser.add_argument("-v", "--version", action="version", version="%(prog)s " + VERSION, help="Show script version")
return parser.parse_args()
def validate_ensembl_id(ens):
"""Perform simple validation of Ensembl Gene identificators"""
# TODO - validate with some regex
logger = logging.getLogger(__name__)
if ens.startswith("ENSG") and len(ens) == 15:
logger.debug("ID '{}' is ok".format(ens))
return ens
else:
logger.warning("Skipping record {}, it doesn't seem like an ENSEMBL Gene identifier!".format(ens))
raise ValueError
def load_ensembl_ids(f_path):
"""Load ensembl IDs and return them in a list"""
logger = logging.getLogger(__name__)
logger.info("Loading ENSEMBL gene IDs from file '{}'".format(f_path))
ok_ids = list()
with open(f_path, mode='r') as ef:
for line in ef:
l = line.strip()
if l.startswith("#"):
continue
else:
try:
ens = validate_ensembl_id(l)
ok_ids.append(ens)
except ValueError:
pass
logger.info("Loaded {} associated gene names".format(len(ok_ids)))
return ok_ids
def load_gene_names(f_path):
"""Load associated gene names and return them in a list"""
logger = logging.getLogger(__name__)
logger.info("Loading associated gene names from file '{}'".format(f_path))
gene_ids = list()
with open(f_path, mode='r') as gn:
for line in gn:
l = line.strip()
if l.startswith("#"):
continue
else:
gene_ids.append(l)
logger.info("Loaded {} associated gene names".format(len(gene_ids)))
return gene_ids
def sniff_id_types(f_path, no_lines_read=20):
"""Read a few lines from file with IDs and try to guess ID type"""
logger = logging.getLogger(__name__)
logger.info("Reading first {} lines of file '{}' and guessing ID type.".format(no_lines_read, f_path))
lines = []
with open(f_path, mode='r') as f_handle:
i = 0
while i < no_lines_read:
try:
line = next(f_handle)
if not line.startswith("#"):
line.strip()
lines.append(line)
i += 1
except StopIteration:
break
if all([tag.startswith("ENSG") for tag in lines]):
logger.info("ID type set to '{}'".format('ENS_GENE'))
return 'ENS_GENE'
else:
logger.info("ID type set to '{}'".format('ASSOC_GENE_NAME'))
return 'ASSOC_GENE_NAME'
class GPCOptions(object):
"""Container for storing run options"""
# default values
defaults = {'fmt': 'exon', 'five_padding': 0,
'three_padding': 0, 'upstream': 0,
'downstream': 0, 't_types': ['protein_coding'],
't_sources': ['havana', 'ensembl_havana', 'insdc', 'ensembl'],
'logger': logging.getLogger(__name__)}
fmts = ('gene', 'transcript', 'exon')
def __init__(self, *args, **kwargs):
self.logger = logging.getLogger(__name__)
self._cont = {}
for option in GPCOptions.defaults.keys():
if option in kwargs:
# non default value, must be checked
checked_value = self._check_value(option, kwargs[option])
self._cont[option] = checked_value
else:
# set default value
# setattr(self, option, GPCOptions.defaults[option])
self._cont[option] = GPCOptions.defaults[option]
if args:
self.logger.warning("Unable to process values '{}'".format(','.join([str(x) for x in args])))
def _check_value(self, value_type, value):
"""Check option's value if it is in proper format"""
if value_type in ('five_padding', 'three_padding', 'upstream', 'downstream'):
try:
return int(value)
except ValueError:
self.logger.warning("Unable to set option {} to '{}'. Using default {}"
.format(value_type, value, GPCOptions.defaults[value_type]))
return GPCOptions.defaults[value_type]
elif value_type == 'fmt':
if value in GPCOptions.fmts:
return value
else:
self.logger.warning("Unable to set option {} to '{}'. Using default {}"
.format(value_type, value, GPCOptions.defaults[value_type]))
return GPCOptions.defaults[value_type]
elif value_type in ('t_types', 't_sources'):
raise NotImplementedError
else:
raise NotImplementedError
def __setitem__(self, key, value):
self._cont[key] = value
def __getitem__(self, item):
return self._cont[item]
def __delitem__(self, key):
del self._cont[key]
class GenePanelCreator(object):
"""Runner class of Gene Panel Creator (GPC)
Create GPC instance, prepare genes for writing and write them out.
"""
def __init__(self, idx_file, log_file='gene_panel_creator.log', **kwargs):
"""Initialize runner & run options"""
libgpc.setup_logging(log_file)
self.logger = logging.getLogger('gene_panel_creator')
self.idx_file = idx_file
self.out_fmt = GPCOptions(**kwargs)
def prepare_genes(self, file_with_ids):
"""Detect id types of genes, find & return gene objects from gtf file
:param file_with_ids: path to file with gene IDs
:type file_with_ids: str
:return: gtf_to_genes.t_gene objects
:rtype: list
"""
gtfm = libgpc.GTFManager(self.idx_file)
tag_type = sniff_id_types(file_with_ids)
if tag_type == 'ENS_GENE':
ids = load_ensembl_ids(file_with_ids)
return gtfm.get_genes_by_ens_ids(ids)
elif tag_type == 'ASSOC_GENE_NAME':
ids = load_gene_names(file_with_ids)
return gtfm.get_genes_by_assoc_names(ids)
else:
raise NotImplementedError
def write_out_results(self, genes, out_path):
"""Write gene objects in bed format. Apply padding, extend upstream & downstream regions etc.."""
dt = datetime.datetime.now()
str_date = [str(x) for x in dt.timetuple()[:-3]]
with open(out_path, 'w') as out:
out.write("#Generated by gene_panel_creator {} at {}\n".format(VERSION, '-'.join(str_date)))
self.logger.info("Writing {} genes to file '{}'".format(len(genes), out_path))
self.logger.info("Writing records in '{}' format".format(self.out_fmt['fmt']))
for gene in genes:
sg = libgpc.Gene2BedFormatter(gene)
# TODO - apply options
if self.out_fmt['upstream']:
sg.add_upstream(self.out_fmt['upstream'])
if self.out_fmt['downstream']:
sg.add_downstream(self.out_fmt['downstream'])
if self.out_fmt['five_padding']:
sg.add_5_exon_padding(self.out_fmt['five_padding'])
if self.out_fmt['three_padding']:
sg.add_3_exon_padding(self.out_fmt['three_padding'])
for writable in sg.to_be_written(self.out_fmt['fmt']):
out.write(writable + '\n')
if __name__ == '__main__':
args = parse_arguments()
if not args.out_file:
out_path = libgpc.get_nice_path(args.gene_ids)
else:
out_path = args.out_file
if not args.log_path:
log_path = libgpc.get_nice_path(args.gene_ids, suffix='log')
else:
log_path = args.log_path
gpc = GenePanelCreator(args.index_file, log_file=log_path,
fmt=args.fmt, five_padding=args.five_exon_padding,
three_padding=args.three_exon_padding,
upstream=args.upstream, downstream=args.downstream,
)
gn = gpc.prepare_genes(args.gene_ids)
gpc.write_out_results(gn, out_path)
|
humno/gene-panel-creator
|
gene_panel_creator.py
|
Python
|
gpl-2.0
| 10,269
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.stats', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## log.h (module 'core'): ns3::LogLevel [enumeration]
module.add_enum('LogLevel', ['LOG_NONE', 'LOG_ERROR', 'LOG_LEVEL_ERROR', 'LOG_WARN', 'LOG_LEVEL_WARN', 'LOG_DEBUG', 'LOG_LEVEL_DEBUG', 'LOG_INFO', 'LOG_LEVEL_INFO', 'LOG_FUNCTION', 'LOG_LEVEL_FUNCTION', 'LOG_LOGIC', 'LOG_LEVEL_LOGIC', 'LOG_ALL', 'LOG_LEVEL_ALL', 'LOG_PREFIX_FUNC', 'LOG_PREFIX_TIME', 'LOG_PREFIX_NODE', 'LOG_PREFIX_LEVEL', 'LOG_PREFIX_ALL'], import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
typehandlers.add_type_alias('std::list< ns3::AttributeConstructionList::Item > const_iterator', 'ns3::AttributeConstructionList::CIterator')
typehandlers.add_type_alias('std::list< ns3::AttributeConstructionList::Item > const_iterator*', 'ns3::AttributeConstructionList::CIterator*')
typehandlers.add_type_alias('std::list< ns3::AttributeConstructionList::Item > const_iterator&', 'ns3::AttributeConstructionList::CIterator&')
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## data-output-interface.h (module 'stats'): ns3::DataOutputCallback [class]
module.add_class('DataOutputCallback', allow_subclassing=True)
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeChecker'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::AttributeValue'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::EventImpl> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::EventImpl'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::SQLiteOutput> [struct]
module.add_class('DefaultDeleter', template_parameters=['ns3::SQLiteOutput'])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor> [struct]
module.add_class('DefaultDeleter', import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor'])
## event-id.h (module 'core'): ns3::EventId [class]
module.add_class('EventId', import_from_module='ns.core')
## event-id.h (module 'core'): ns3::EventId::UID [enumeration]
module.add_enum('UID', ['INVALID', 'NOW', 'DESTROY', 'RESERVED', 'VALID'], outer_class=root_module['ns3::EventId'], import_from_module='ns.core')
## file-helper.h (module 'stats'): ns3::FileHelper [class]
module.add_class('FileHelper')
## gnuplot.h (module 'stats'): ns3::Gnuplot [class]
module.add_class('Gnuplot')
## gnuplot.h (module 'stats'): ns3::GnuplotCollection [class]
module.add_class('GnuplotCollection')
## gnuplot.h (module 'stats'): ns3::GnuplotDataset [class]
module.add_class('GnuplotDataset')
## gnuplot-helper.h (module 'stats'): ns3::GnuplotHelper [class]
module.add_class('GnuplotHelper')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## histogram.h (module 'stats'): ns3::Histogram [class]
module.add_class('Histogram')
## log.h (module 'core'): ns3::LogComponent [class]
module.add_class('LogComponent', import_from_module='ns.core')
typehandlers.add_type_alias('std::map< std::string, ns3::LogComponent * >', 'ns3::LogComponent::ComponentList')
typehandlers.add_type_alias('std::map< std::string, ns3::LogComponent * >*', 'ns3::LogComponent::ComponentList*')
typehandlers.add_type_alias('std::map< std::string, ns3::LogComponent * >&', 'ns3::LogComponent::ComponentList&')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## log.h (module 'core'): ns3::ParameterLogger [class]
module.add_class('ParameterLogger', import_from_module='ns.core')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::ObjectBase'], template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'])
## simulator.h (module 'core'): ns3::Simulator [class]
module.add_class('Simulator', destructor_visibility='private', import_from_module='ns.core')
## simulator.h (module 'core'): ns3::Simulator [enumeration]
module.add_enum('', ['NO_CONTEXT'], outer_class=root_module['ns3::Simulator'], import_from_module='ns.core')
## data-calculator.h (module 'stats'): ns3::StatisticalSummary [class]
module.add_class('StatisticalSummary', allow_subclassing=True)
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST', 'AUTO'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
typehandlers.add_type_alias('void ( * ) ( ns3::Time )', 'ns3::Time::TracedCallback')
typehandlers.add_type_alias('void ( * ) ( ns3::Time )*', 'ns3::Time::TracedCallback*')
typehandlers.add_type_alias('void ( * ) ( ns3::Time )&', 'ns3::Time::TracedCallback&')
## nstime.h (module 'core'): ns3::TimeWithUnit [class]
module.add_class('TimeWithUnit', import_from_module='ns.core')
## traced-value.h (module 'core'): ns3::TracedValue<bool> [class]
module.add_class('TracedValue', import_from_module='ns.core', template_parameters=['bool'])
## traced-value.h (module 'core'): ns3::TracedValue<double> [class]
module.add_class('TracedValue', import_from_module='ns.core', template_parameters=['double'])
## traced-value.h (module 'core'): ns3::TracedValue<unsigned char> [class]
module.add_class('TracedValue', import_from_module='ns.core', template_parameters=['unsigned char'])
## traced-value.h (module 'core'): ns3::TracedValue<unsigned int> [class]
module.add_class('TracedValue', import_from_module='ns.core', template_parameters=['unsigned int'])
## traced-value.h (module 'core'): ns3::TracedValue<unsigned short> [class]
module.add_class('TracedValue', import_from_module='ns.core', template_parameters=['unsigned short'])
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::SupportLevel [enumeration]
module.add_enum('SupportLevel', ['SUPPORTED', 'DEPRECATED', 'OBSOLETE'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
typehandlers.add_type_alias('uint32_t', 'ns3::TypeId::hash_t')
typehandlers.add_type_alias('uint32_t*', 'ns3::TypeId::hash_t*')
typehandlers.add_type_alias('uint32_t&', 'ns3::TypeId::hash_t&')
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-128.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## int64x64-128.h (module 'core'): ns3::int64x64_t::impl_type [enumeration]
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
## gnuplot.h (module 'stats'): ns3::Gnuplot2dDataset [class]
module.add_class('Gnuplot2dDataset', parent=root_module['ns3::GnuplotDataset'])
## gnuplot.h (module 'stats'): ns3::Gnuplot2dDataset::Style [enumeration]
module.add_enum('Style', ['LINES', 'POINTS', 'LINES_POINTS', 'DOTS', 'IMPULSES', 'STEPS', 'FSTEPS', 'HISTEPS'], outer_class=root_module['ns3::Gnuplot2dDataset'])
## gnuplot.h (module 'stats'): ns3::Gnuplot2dDataset::ErrorBars [enumeration]
module.add_enum('ErrorBars', ['NONE', 'X', 'Y', 'XY'], outer_class=root_module['ns3::Gnuplot2dDataset'])
## gnuplot.h (module 'stats'): ns3::Gnuplot2dFunction [class]
module.add_class('Gnuplot2dFunction', parent=root_module['ns3::GnuplotDataset'])
## gnuplot.h (module 'stats'): ns3::Gnuplot3dDataset [class]
module.add_class('Gnuplot3dDataset', parent=root_module['ns3::GnuplotDataset'])
## gnuplot.h (module 'stats'): ns3::Gnuplot3dFunction [class]
module.add_class('Gnuplot3dFunction', parent=root_module['ns3::GnuplotDataset'])
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::EventImpl', 'ns3::empty', 'ns3::DefaultDeleter<ns3::EventImpl>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SQLiteOutput, ns3::empty, ns3::DefaultDeleter<ns3::SQLiteOutput> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::SQLiteOutput', 'ns3::empty', 'ns3::DefaultDeleter<ns3::SQLiteOutput>'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'), parent=root_module['ns3::empty'], template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## boolean.h (module 'core'): ns3::BooleanChecker [class]
module.add_class('BooleanChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## boolean.h (module 'core'): ns3::BooleanValue [class]
module.add_class('BooleanValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## data-calculator.h (module 'stats'): ns3::DataCalculator [class]
module.add_class('DataCalculator', parent=root_module['ns3::Object'])
## data-collection-object.h (module 'stats'): ns3::DataCollectionObject [class]
module.add_class('DataCollectionObject', parent=root_module['ns3::Object'])
## data-collector.h (module 'stats'): ns3::DataCollector [class]
module.add_class('DataCollector', parent=root_module['ns3::Object'])
## data-output-interface.h (module 'stats'): ns3::DataOutputInterface [class]
module.add_class('DataOutputInterface', parent=root_module['ns3::Object'])
## double.h (module 'core'): ns3::DoubleValue [class]
module.add_class('DoubleValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor [class]
module.add_class('EmptyAttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::AttributeAccessor'])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker [class]
module.add_class('EmptyAttributeChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## enum.h (module 'core'): ns3::EnumChecker [class]
module.add_class('EnumChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## enum.h (module 'core'): ns3::EnumValue [class]
module.add_class('EnumValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## event-impl.h (module 'core'): ns3::EventImpl [class]
module.add_class('EventImpl', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
## file-aggregator.h (module 'stats'): ns3::FileAggregator [class]
module.add_class('FileAggregator', parent=root_module['ns3::DataCollectionObject'])
## file-aggregator.h (module 'stats'): ns3::FileAggregator::FileType [enumeration]
module.add_enum('FileType', ['FORMATTED', 'SPACE_SEPARATED', 'COMMA_SEPARATED', 'TAB_SEPARATED'], outer_class=root_module['ns3::FileAggregator'])
## gnuplot-aggregator.h (module 'stats'): ns3::GnuplotAggregator [class]
module.add_class('GnuplotAggregator', parent=root_module['ns3::DataCollectionObject'])
## gnuplot-aggregator.h (module 'stats'): ns3::GnuplotAggregator::KeyLocation [enumeration]
module.add_enum('KeyLocation', ['NO_KEY', 'KEY_INSIDE', 'KEY_ABOVE', 'KEY_BELOW'], outer_class=root_module['ns3::GnuplotAggregator'])
## integer.h (module 'core'): ns3::IntegerValue [class]
module.add_class('IntegerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## basic-data-calculators.h (module 'stats'): ns3::MinMaxAvgTotalCalculator<double> [class]
module.add_class('MinMaxAvgTotalCalculator', parent=[root_module['ns3::DataCalculator'], root_module['ns3::StatisticalSummary']], template_parameters=['double'])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## omnet-data-output.h (module 'stats'): ns3::OmnetDataOutput [class]
module.add_class('OmnetDataOutput', parent=root_module['ns3::DataOutputInterface'])
## probe.h (module 'stats'): ns3::Probe [class]
module.add_class('Probe', parent=root_module['ns3::DataCollectionObject'])
## sqlite-output.h (module 'stats'): ns3::SQLiteOutput [class]
module.add_class('SQLiteOutput', parent=root_module['ns3::SimpleRefCount< ns3::SQLiteOutput, ns3::empty, ns3::DefaultDeleter<ns3::SQLiteOutput> >'])
## sqlite-data-output.h (module 'stats'): ns3::SqliteDataOutput [class]
module.add_class('SqliteDataOutput', parent=root_module['ns3::DataOutputInterface'])
## time-data-calculators.h (module 'stats'): ns3::TimeMinMaxAvgTotalCalculator [class]
module.add_class('TimeMinMaxAvgTotalCalculator', parent=root_module['ns3::DataCalculator'])
## time-probe.h (module 'stats'): ns3::TimeProbe [class]
module.add_class('TimeProbe', parent=root_module['ns3::Probe'])
## time-series-adaptor.h (module 'stats'): ns3::TimeSeriesAdaptor [class]
module.add_class('TimeSeriesAdaptor', parent=root_module['ns3::DataCollectionObject'])
typehandlers.add_type_alias('void ( * ) ( double const, double const )', 'ns3::TimeSeriesAdaptor::OutputTracedCallback')
typehandlers.add_type_alias('void ( * ) ( double const, double const )*', 'ns3::TimeSeriesAdaptor::OutputTracedCallback*')
typehandlers.add_type_alias('void ( * ) ( double const, double const )&', 'ns3::TimeSeriesAdaptor::OutputTracedCallback&')
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## uinteger-16-probe.h (module 'stats'): ns3::Uinteger16Probe [class]
module.add_class('Uinteger16Probe', parent=root_module['ns3::Probe'])
## uinteger-32-probe.h (module 'stats'): ns3::Uinteger32Probe [class]
module.add_class('Uinteger32Probe', parent=root_module['ns3::Probe'])
## uinteger-8-probe.h (module 'stats'): ns3::Uinteger8Probe [class]
module.add_class('Uinteger8Probe', parent=root_module['ns3::Probe'])
## uinteger.h (module 'core'): ns3::UintegerValue [class]
module.add_class('UintegerValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## boolean-probe.h (module 'stats'): ns3::BooleanProbe [class]
module.add_class('BooleanProbe', parent=root_module['ns3::Probe'])
## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', parent=root_module['ns3::CallbackImplBase'], template_parameters=['ns3::ObjectBase *', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'])
## callback.h (module 'core'): ns3::CallbackImpl<void, bool, bool, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', parent=root_module['ns3::CallbackImplBase'], template_parameters=['void', 'bool', 'bool', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'])
## callback.h (module 'core'): ns3::CallbackImpl<void, double, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', parent=root_module['ns3::CallbackImplBase'], template_parameters=['void', 'double', 'double', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'])
## callback.h (module 'core'): ns3::CallbackImpl<void, unsigned char, unsigned char, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', parent=root_module['ns3::CallbackImplBase'], template_parameters=['void', 'unsigned char', 'unsigned char', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'])
## callback.h (module 'core'): ns3::CallbackImpl<void, unsigned int, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', parent=root_module['ns3::CallbackImplBase'], template_parameters=['void', 'unsigned int', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'])
## callback.h (module 'core'): ns3::CallbackImpl<void, unsigned short, unsigned short, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> [class]
module.add_class('CallbackImpl', import_from_module='ns.core', parent=root_module['ns3::CallbackImplBase'], template_parameters=['void', 'unsigned short', 'unsigned short', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'])
## double-probe.h (module 'stats'): ns3::DoubleProbe [class]
module.add_class('DoubleProbe', parent=root_module['ns3::Probe'])
module.add_container('std::map< std::string, ns3::LogComponent * >', ('std::string', 'ns3::LogComponent *'), container_type='map')
typehandlers.add_type_alias('std::list< ns3::Ptr< ns3::DataCalculator > >', 'ns3::DataCalculatorList')
typehandlers.add_type_alias('std::list< ns3::Ptr< ns3::DataCalculator > >*', 'ns3::DataCalculatorList*')
typehandlers.add_type_alias('std::list< ns3::Ptr< ns3::DataCalculator > >&', 'ns3::DataCalculatorList&')
typehandlers.add_type_alias('std::list< std::pair< std::string, std::string > >', 'ns3::MetadataList')
typehandlers.add_type_alias('std::list< std::pair< std::string, std::string > >*', 'ns3::MetadataList*')
typehandlers.add_type_alias('std::list< std::pair< std::string, std::string > >&', 'ns3::MetadataList&')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )', 'ns3::TimePrinter')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )*', 'ns3::TimePrinter*')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )&', 'ns3::TimePrinter&')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )', 'ns3::NodePrinter')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )*', 'ns3::NodePrinter*')
typehandlers.add_type_alias('void ( * ) ( std::ostream & )&', 'ns3::NodePrinter&')
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
## Register a nested module for the namespace TracedValueCallback
nested_module = module.add_cpp_namespace('TracedValueCallback')
register_types_ns3_TracedValueCallback(nested_module)
## Register a nested module for the namespace internal
nested_module = module.add_cpp_namespace('internal')
register_types_ns3_internal(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias('uint32_t ( * ) ( char const *, std::size_t const )', 'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias('uint32_t ( * ) ( char const *, std::size_t const )*', 'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias('uint32_t ( * ) ( char const *, std::size_t const )&', 'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias('uint64_t ( * ) ( char const *, std::size_t const )', 'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias('uint64_t ( * ) ( char const *, std::size_t const )*', 'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias('uint64_t ( * ) ( char const *, std::size_t const )&', 'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_types_ns3_TracedValueCallback(module):
root_module = module.get_root()
typehandlers.add_type_alias('void ( * ) ( ns3::Time, ns3::Time )', 'ns3::TracedValueCallback::Time')
typehandlers.add_type_alias('void ( * ) ( ns3::Time, ns3::Time )*', 'ns3::TracedValueCallback::Time*')
typehandlers.add_type_alias('void ( * ) ( ns3::Time, ns3::Time )&', 'ns3::TracedValueCallback::Time&')
typehandlers.add_type_alias('void ( * ) ( bool, bool )', 'ns3::TracedValueCallback::Bool')
typehandlers.add_type_alias('void ( * ) ( bool, bool )*', 'ns3::TracedValueCallback::Bool*')
typehandlers.add_type_alias('void ( * ) ( bool, bool )&', 'ns3::TracedValueCallback::Bool&')
typehandlers.add_type_alias('void ( * ) ( int8_t, int8_t )', 'ns3::TracedValueCallback::Int8')
typehandlers.add_type_alias('void ( * ) ( int8_t, int8_t )*', 'ns3::TracedValueCallback::Int8*')
typehandlers.add_type_alias('void ( * ) ( int8_t, int8_t )&', 'ns3::TracedValueCallback::Int8&')
typehandlers.add_type_alias('void ( * ) ( uint8_t, uint8_t )', 'ns3::TracedValueCallback::Uint8')
typehandlers.add_type_alias('void ( * ) ( uint8_t, uint8_t )*', 'ns3::TracedValueCallback::Uint8*')
typehandlers.add_type_alias('void ( * ) ( uint8_t, uint8_t )&', 'ns3::TracedValueCallback::Uint8&')
typehandlers.add_type_alias('void ( * ) ( int16_t, int16_t )', 'ns3::TracedValueCallback::Int16')
typehandlers.add_type_alias('void ( * ) ( int16_t, int16_t )*', 'ns3::TracedValueCallback::Int16*')
typehandlers.add_type_alias('void ( * ) ( int16_t, int16_t )&', 'ns3::TracedValueCallback::Int16&')
typehandlers.add_type_alias('void ( * ) ( uint16_t, uint16_t )', 'ns3::TracedValueCallback::Uint16')
typehandlers.add_type_alias('void ( * ) ( uint16_t, uint16_t )*', 'ns3::TracedValueCallback::Uint16*')
typehandlers.add_type_alias('void ( * ) ( uint16_t, uint16_t )&', 'ns3::TracedValueCallback::Uint16&')
typehandlers.add_type_alias('void ( * ) ( int32_t, int32_t )', 'ns3::TracedValueCallback::Int32')
typehandlers.add_type_alias('void ( * ) ( int32_t, int32_t )*', 'ns3::TracedValueCallback::Int32*')
typehandlers.add_type_alias('void ( * ) ( int32_t, int32_t )&', 'ns3::TracedValueCallback::Int32&')
typehandlers.add_type_alias('void ( * ) ( uint32_t, uint32_t )', 'ns3::TracedValueCallback::Uint32')
typehandlers.add_type_alias('void ( * ) ( uint32_t, uint32_t )*', 'ns3::TracedValueCallback::Uint32*')
typehandlers.add_type_alias('void ( * ) ( uint32_t, uint32_t )&', 'ns3::TracedValueCallback::Uint32&')
typehandlers.add_type_alias('void ( * ) ( int64_t, int64_t )', 'ns3::TracedValueCallback::Int64')
typehandlers.add_type_alias('void ( * ) ( int64_t, int64_t )*', 'ns3::TracedValueCallback::Int64*')
typehandlers.add_type_alias('void ( * ) ( int64_t, int64_t )&', 'ns3::TracedValueCallback::Int64&')
typehandlers.add_type_alias('void ( * ) ( uint64_t, uint64_t )', 'ns3::TracedValueCallback::Uint64')
typehandlers.add_type_alias('void ( * ) ( uint64_t, uint64_t )*', 'ns3::TracedValueCallback::Uint64*')
typehandlers.add_type_alias('void ( * ) ( uint64_t, uint64_t )&', 'ns3::TracedValueCallback::Uint64&')
typehandlers.add_type_alias('void ( * ) ( double, double )', 'ns3::TracedValueCallback::Double')
typehandlers.add_type_alias('void ( * ) ( double, double )*', 'ns3::TracedValueCallback::Double*')
typehandlers.add_type_alias('void ( * ) ( double, double )&', 'ns3::TracedValueCallback::Double&')
typehandlers.add_type_alias('void ( * ) ( )', 'ns3::TracedValueCallback::Void')
typehandlers.add_type_alias('void ( * ) ( )*', 'ns3::TracedValueCallback::Void*')
typehandlers.add_type_alias('void ( * ) ( )&', 'ns3::TracedValueCallback::Void&')
def register_types_ns3_internal(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3DataOutputCallback_methods(root_module, root_module['ns3::DataOutputCallback'])
register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeAccessor >'])
register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeChecker >'])
register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, root_module['ns3::DefaultDeleter< ns3::AttributeValue >'])
register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, root_module['ns3::DefaultDeleter< ns3::CallbackImplBase >'])
register_Ns3DefaultDeleter__Ns3EventImpl_methods(root_module, root_module['ns3::DefaultDeleter< ns3::EventImpl >'])
register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, root_module['ns3::DefaultDeleter< ns3::Hash::Implementation >'])
register_Ns3DefaultDeleter__Ns3SQLiteOutput_methods(root_module, root_module['ns3::DefaultDeleter< ns3::SQLiteOutput >'])
register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::DefaultDeleter< ns3::TraceSourceAccessor >'])
register_Ns3EventId_methods(root_module, root_module['ns3::EventId'])
register_Ns3FileHelper_methods(root_module, root_module['ns3::FileHelper'])
register_Ns3Gnuplot_methods(root_module, root_module['ns3::Gnuplot'])
register_Ns3GnuplotCollection_methods(root_module, root_module['ns3::GnuplotCollection'])
register_Ns3GnuplotDataset_methods(root_module, root_module['ns3::GnuplotDataset'])
register_Ns3GnuplotHelper_methods(root_module, root_module['ns3::GnuplotHelper'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3Histogram_methods(root_module, root_module['ns3::Histogram'])
register_Ns3LogComponent_methods(root_module, root_module['ns3::LogComponent'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3ParameterLogger_methods(root_module, root_module['ns3::ParameterLogger'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3Simulator_methods(root_module, root_module['ns3::Simulator'])
register_Ns3StatisticalSummary_methods(root_module, root_module['ns3::StatisticalSummary'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit'])
register_Ns3TracedValue__Bool_methods(root_module, root_module['ns3::TracedValue< bool >'])
register_Ns3TracedValue__Double_methods(root_module, root_module['ns3::TracedValue< double >'])
register_Ns3TracedValue__Unsigned_char_methods(root_module, root_module['ns3::TracedValue< unsigned char >'])
register_Ns3TracedValue__Unsigned_int_methods(root_module, root_module['ns3::TracedValue< unsigned int >'])
register_Ns3TracedValue__Unsigned_short_methods(root_module, root_module['ns3::TracedValue< unsigned short >'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Gnuplot2dDataset_methods(root_module, root_module['ns3::Gnuplot2dDataset'])
register_Ns3Gnuplot2dFunction_methods(root_module, root_module['ns3::Gnuplot2dFunction'])
register_Ns3Gnuplot3dDataset_methods(root_module, root_module['ns3::Gnuplot3dDataset'])
register_Ns3Gnuplot3dFunction_methods(root_module, root_module['ns3::Gnuplot3dFunction'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3SQLiteOutput_Ns3Empty_Ns3DefaultDeleter__lt__ns3SQLiteOutput__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::SQLiteOutput, ns3::empty, ns3::DefaultDeleter<ns3::SQLiteOutput> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3BooleanChecker_methods(root_module, root_module['ns3::BooleanChecker'])
register_Ns3BooleanValue_methods(root_module, root_module['ns3::BooleanValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3DataCalculator_methods(root_module, root_module['ns3::DataCalculator'])
register_Ns3DataCollectionObject_methods(root_module, root_module['ns3::DataCollectionObject'])
register_Ns3DataCollector_methods(root_module, root_module['ns3::DataCollector'])
register_Ns3DataOutputInterface_methods(root_module, root_module['ns3::DataOutputInterface'])
register_Ns3DoubleValue_methods(root_module, root_module['ns3::DoubleValue'])
register_Ns3EmptyAttributeAccessor_methods(root_module, root_module['ns3::EmptyAttributeAccessor'])
register_Ns3EmptyAttributeChecker_methods(root_module, root_module['ns3::EmptyAttributeChecker'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EnumChecker_methods(root_module, root_module['ns3::EnumChecker'])
register_Ns3EnumValue_methods(root_module, root_module['ns3::EnumValue'])
register_Ns3EventImpl_methods(root_module, root_module['ns3::EventImpl'])
register_Ns3FileAggregator_methods(root_module, root_module['ns3::FileAggregator'])
register_Ns3GnuplotAggregator_methods(root_module, root_module['ns3::GnuplotAggregator'])
register_Ns3IntegerValue_methods(root_module, root_module['ns3::IntegerValue'])
register_Ns3MinMaxAvgTotalCalculator__Double_methods(root_module, root_module['ns3::MinMaxAvgTotalCalculator< double >'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3OmnetDataOutput_methods(root_module, root_module['ns3::OmnetDataOutput'])
register_Ns3Probe_methods(root_module, root_module['ns3::Probe'])
register_Ns3SQLiteOutput_methods(root_module, root_module['ns3::SQLiteOutput'])
register_Ns3SqliteDataOutput_methods(root_module, root_module['ns3::SqliteDataOutput'])
register_Ns3TimeMinMaxAvgTotalCalculator_methods(root_module, root_module['ns3::TimeMinMaxAvgTotalCalculator'])
register_Ns3TimeProbe_methods(root_module, root_module['ns3::TimeProbe'])
register_Ns3TimeSeriesAdaptor_methods(root_module, root_module['ns3::TimeSeriesAdaptor'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3Uinteger16Probe_methods(root_module, root_module['ns3::Uinteger16Probe'])
register_Ns3Uinteger32Probe_methods(root_module, root_module['ns3::Uinteger32Probe'])
register_Ns3Uinteger8Probe_methods(root_module, root_module['ns3::Uinteger8Probe'])
register_Ns3UintegerValue_methods(root_module, root_module['ns3::UintegerValue'])
register_Ns3BooleanProbe_methods(root_module, root_module['ns3::BooleanProbe'])
register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Bool_Bool_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, bool, bool, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Double_Double_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, double, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Unsigned_char_Unsigned_char_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, unsigned char, unsigned char, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Unsigned_int_Unsigned_int_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, unsigned int, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3CallbackImpl__Void_Unsigned_short_Unsigned_short_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, root_module['ns3::CallbackImpl< void, unsigned short, unsigned short, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >'])
register_Ns3DoubleProbe_methods(root_module, root_module['ns3::DoubleProbe'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::CIterator ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'ns3::AttributeConstructionList::CIterator',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::CIterator ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'ns3::AttributeConstructionList::CIterator',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
return
def register_Ns3DataOutputCallback_methods(root_module, cls):
## data-output-interface.h (module 'stats'): ns3::DataOutputCallback::DataOutputCallback() [constructor]
cls.add_constructor([])
## data-output-interface.h (module 'stats'): ns3::DataOutputCallback::DataOutputCallback(ns3::DataOutputCallback const & arg0) [constructor]
cls.add_constructor([param('ns3::DataOutputCallback const &', 'arg0')])
## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputSingleton(std::string key, std::string variable, int val) [member function]
cls.add_method('OutputSingleton',
'void',
[param('std::string', 'key'), param('std::string', 'variable'), param('int', 'val')],
is_pure_virtual=True, is_virtual=True)
## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputSingleton(std::string key, std::string variable, uint32_t val) [member function]
cls.add_method('OutputSingleton',
'void',
[param('std::string', 'key'), param('std::string', 'variable'), param('uint32_t', 'val')],
is_pure_virtual=True, is_virtual=True)
## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputSingleton(std::string key, std::string variable, double val) [member function]
cls.add_method('OutputSingleton',
'void',
[param('std::string', 'key'), param('std::string', 'variable'), param('double', 'val')],
is_pure_virtual=True, is_virtual=True)
## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputSingleton(std::string key, std::string variable, std::string val) [member function]
cls.add_method('OutputSingleton',
'void',
[param('std::string', 'key'), param('std::string', 'variable'), param('std::string', 'val')],
is_pure_virtual=True, is_virtual=True)
## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputSingleton(std::string key, std::string variable, ns3::Time val) [member function]
cls.add_method('OutputSingleton',
'void',
[param('std::string', 'key'), param('std::string', 'variable'), param('ns3::Time', 'val')],
is_pure_virtual=True, is_virtual=True)
## data-output-interface.h (module 'stats'): void ns3::DataOutputCallback::OutputStatistic(std::string key, std::string variable, ns3::StatisticalSummary const * statSum) [member function]
cls.add_method('OutputStatistic',
'void',
[param('std::string', 'key'), param('std::string', 'variable'), param('ns3::StatisticalSummary const *', 'statSum')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3DefaultDeleter__Ns3AttributeAccessor_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeAccessor>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeAccessor> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeAccessor > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeAccessor>::Delete(ns3::AttributeAccessor * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::AttributeAccessor *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3AttributeChecker_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeChecker>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeChecker> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeChecker > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeChecker>::Delete(ns3::AttributeChecker * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::AttributeChecker *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3AttributeValue_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::AttributeValue>::DefaultDeleter(ns3::DefaultDeleter<ns3::AttributeValue> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::AttributeValue > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::AttributeValue>::Delete(ns3::AttributeValue * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::AttributeValue *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3CallbackImplBase_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::CallbackImplBase>::DefaultDeleter(ns3::DefaultDeleter<ns3::CallbackImplBase> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::CallbackImplBase > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::CallbackImplBase>::Delete(ns3::CallbackImplBase * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::CallbackImplBase *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3EventImpl_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::EventImpl>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::EventImpl>::DefaultDeleter(ns3::DefaultDeleter<ns3::EventImpl> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::EventImpl > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::EventImpl>::Delete(ns3::EventImpl * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::EventImpl *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3HashImplementation_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::Hash::Implementation>::DefaultDeleter(ns3::DefaultDeleter<ns3::Hash::Implementation> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::Hash::Implementation > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::Hash::Implementation>::Delete(ns3::Hash::Implementation * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Hash::Implementation *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3SQLiteOutput_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::SQLiteOutput>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::SQLiteOutput>::DefaultDeleter(ns3::DefaultDeleter<ns3::SQLiteOutput> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::SQLiteOutput > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::SQLiteOutput>::Delete(ns3::SQLiteOutput * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::SQLiteOutput *', 'object')],
is_static=True)
return
def register_Ns3DefaultDeleter__Ns3TraceSourceAccessor_methods(root_module, cls):
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor>::DefaultDeleter() [constructor]
cls.add_constructor([])
## default-deleter.h (module 'core'): ns3::DefaultDeleter<ns3::TraceSourceAccessor>::DefaultDeleter(ns3::DefaultDeleter<ns3::TraceSourceAccessor> const & arg0) [constructor]
cls.add_constructor([param('ns3::DefaultDeleter< ns3::TraceSourceAccessor > const &', 'arg0')])
## default-deleter.h (module 'core'): static void ns3::DefaultDeleter<ns3::TraceSourceAccessor>::Delete(ns3::TraceSourceAccessor * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::TraceSourceAccessor *', 'object')],
is_static=True)
return
def register_Ns3EventId_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<')
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::EventId const & arg0) [constructor]
cls.add_constructor([param('ns3::EventId const &', 'arg0')])
## event-id.h (module 'core'): ns3::EventId::EventId() [constructor]
cls.add_constructor([])
## event-id.h (module 'core'): ns3::EventId::EventId(ns3::Ptr<ns3::EventImpl> const & impl, uint64_t ts, uint32_t context, uint32_t uid) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::EventImpl > const &', 'impl'), param('uint64_t', 'ts'), param('uint32_t', 'context'), param('uint32_t', 'uid')])
## event-id.h (module 'core'): void ns3::EventId::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-id.h (module 'core'): uint32_t ns3::EventId::GetContext() const [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): uint64_t ns3::EventId::GetTs() const [member function]
cls.add_method('GetTs',
'uint64_t',
[],
is_const=True)
## event-id.h (module 'core'): uint32_t ns3::EventId::GetUid() const [member function]
cls.add_method('GetUid',
'uint32_t',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsExpired() const [member function]
cls.add_method('IsExpired',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): bool ns3::EventId::IsRunning() const [member function]
cls.add_method('IsRunning',
'bool',
[],
is_const=True)
## event-id.h (module 'core'): ns3::EventImpl * ns3::EventId::PeekEventImpl() const [member function]
cls.add_method('PeekEventImpl',
'ns3::EventImpl *',
[],
is_const=True)
## event-id.h (module 'core'): void ns3::EventId::Remove() [member function]
cls.add_method('Remove',
'void',
[])
return
def register_Ns3FileHelper_methods(root_module, cls):
## file-helper.h (module 'stats'): ns3::FileHelper::FileHelper(ns3::FileHelper const & arg0) [constructor]
cls.add_constructor([param('ns3::FileHelper const &', 'arg0')])
## file-helper.h (module 'stats'): ns3::FileHelper::FileHelper() [constructor]
cls.add_constructor([])
## file-helper.h (module 'stats'): ns3::FileHelper::FileHelper(std::string const & outputFileNameWithoutExtension, ns3::FileAggregator::FileType fileType=::ns3::FileAggregator::FileType::SPACE_SEPARATED) [constructor]
cls.add_constructor([param('std::string const &', 'outputFileNameWithoutExtension'), param('ns3::FileAggregator::FileType', 'fileType', default_value='::ns3::FileAggregator::FileType::SPACE_SEPARATED')])
## file-helper.h (module 'stats'): void ns3::FileHelper::AddAggregator(std::string const & aggregatorName, std::string const & outputFileName, bool onlyOneAggregator) [member function]
cls.add_method('AddAggregator',
'void',
[param('std::string const &', 'aggregatorName'), param('std::string const &', 'outputFileName'), param('bool', 'onlyOneAggregator')])
## file-helper.h (module 'stats'): void ns3::FileHelper::AddTimeSeriesAdaptor(std::string const & adaptorName) [member function]
cls.add_method('AddTimeSeriesAdaptor',
'void',
[param('std::string const &', 'adaptorName')])
## file-helper.h (module 'stats'): void ns3::FileHelper::ConfigureFile(std::string const & outputFileNameWithoutExtension, ns3::FileAggregator::FileType fileType=::ns3::FileAggregator::FileType::SPACE_SEPARATED) [member function]
cls.add_method('ConfigureFile',
'void',
[param('std::string const &', 'outputFileNameWithoutExtension'), param('ns3::FileAggregator::FileType', 'fileType', default_value='::ns3::FileAggregator::FileType::SPACE_SEPARATED')])
## file-helper.h (module 'stats'): ns3::Ptr<ns3::FileAggregator> ns3::FileHelper::GetAggregatorMultiple(std::string const & aggregatorName, std::string const & outputFileName) [member function]
cls.add_method('GetAggregatorMultiple',
'ns3::Ptr< ns3::FileAggregator >',
[param('std::string const &', 'aggregatorName'), param('std::string const &', 'outputFileName')])
## file-helper.h (module 'stats'): ns3::Ptr<ns3::FileAggregator> ns3::FileHelper::GetAggregatorSingle() [member function]
cls.add_method('GetAggregatorSingle',
'ns3::Ptr< ns3::FileAggregator >',
[])
## file-helper.h (module 'stats'): ns3::Ptr<ns3::Probe> ns3::FileHelper::GetProbe(std::string probeName) const [member function]
cls.add_method('GetProbe',
'ns3::Ptr< ns3::Probe >',
[param('std::string', 'probeName')],
is_const=True)
## file-helper.h (module 'stats'): void ns3::FileHelper::Set10dFormat(std::string const & format) [member function]
cls.add_method('Set10dFormat',
'void',
[param('std::string const &', 'format')])
## file-helper.h (module 'stats'): void ns3::FileHelper::Set1dFormat(std::string const & format) [member function]
cls.add_method('Set1dFormat',
'void',
[param('std::string const &', 'format')])
## file-helper.h (module 'stats'): void ns3::FileHelper::Set2dFormat(std::string const & format) [member function]
cls.add_method('Set2dFormat',
'void',
[param('std::string const &', 'format')])
## file-helper.h (module 'stats'): void ns3::FileHelper::Set3dFormat(std::string const & format) [member function]
cls.add_method('Set3dFormat',
'void',
[param('std::string const &', 'format')])
## file-helper.h (module 'stats'): void ns3::FileHelper::Set4dFormat(std::string const & format) [member function]
cls.add_method('Set4dFormat',
'void',
[param('std::string const &', 'format')])
## file-helper.h (module 'stats'): void ns3::FileHelper::Set5dFormat(std::string const & format) [member function]
cls.add_method('Set5dFormat',
'void',
[param('std::string const &', 'format')])
## file-helper.h (module 'stats'): void ns3::FileHelper::Set6dFormat(std::string const & format) [member function]
cls.add_method('Set6dFormat',
'void',
[param('std::string const &', 'format')])
## file-helper.h (module 'stats'): void ns3::FileHelper::Set7dFormat(std::string const & format) [member function]
cls.add_method('Set7dFormat',
'void',
[param('std::string const &', 'format')])
## file-helper.h (module 'stats'): void ns3::FileHelper::Set8dFormat(std::string const & format) [member function]
cls.add_method('Set8dFormat',
'void',
[param('std::string const &', 'format')])
## file-helper.h (module 'stats'): void ns3::FileHelper::Set9dFormat(std::string const & format) [member function]
cls.add_method('Set9dFormat',
'void',
[param('std::string const &', 'format')])
## file-helper.h (module 'stats'): void ns3::FileHelper::SetHeading(std::string const & heading) [member function]
cls.add_method('SetHeading',
'void',
[param('std::string const &', 'heading')])
## file-helper.h (module 'stats'): void ns3::FileHelper::WriteProbe(std::string const & typeId, std::string const & path, std::string const & probeTraceSource) [member function]
cls.add_method('WriteProbe',
'void',
[param('std::string const &', 'typeId'), param('std::string const &', 'path'), param('std::string const &', 'probeTraceSource')])
return
def register_Ns3Gnuplot_methods(root_module, cls):
## gnuplot.h (module 'stats'): ns3::Gnuplot::Gnuplot(ns3::Gnuplot const & arg0) [constructor]
cls.add_constructor([param('ns3::Gnuplot const &', 'arg0')])
## gnuplot.h (module 'stats'): ns3::Gnuplot::Gnuplot(std::string const & outputFilename="", std::string const & title="") [constructor]
cls.add_constructor([param('std::string const &', 'outputFilename', default_value='""'), param('std::string const &', 'title', default_value='""')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot::AddDataset(ns3::GnuplotDataset const & dataset) [member function]
cls.add_method('AddDataset',
'void',
[param('ns3::GnuplotDataset const &', 'dataset')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot::AppendExtra(std::string const & extra) [member function]
cls.add_method('AppendExtra',
'void',
[param('std::string const &', 'extra')])
## gnuplot.h (module 'stats'): static std::string ns3::Gnuplot::DetectTerminal(std::string const & filename) [member function]
cls.add_method('DetectTerminal',
'std::string',
[param('std::string const &', 'filename')],
is_static=True)
## gnuplot.h (module 'stats'): void ns3::Gnuplot::GenerateOutput(std::ostream & os) [member function]
cls.add_method('GenerateOutput',
'void',
[param('std::ostream &', 'os')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot::GenerateOutput(std::ostream & osControl, std::ostream & osData, std::string dataFileName) [member function]
cls.add_method('GenerateOutput',
'void',
[param('std::ostream &', 'osControl'), param('std::ostream &', 'osData'), param('std::string', 'dataFileName')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot::SetDataFileDatasetIndex(unsigned int index) [member function]
cls.add_method('SetDataFileDatasetIndex',
'void',
[param('unsigned int', 'index')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot::SetExtra(std::string const & extra) [member function]
cls.add_method('SetExtra',
'void',
[param('std::string const &', 'extra')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot::SetLegend(std::string const & xLegend, std::string const & yLegend) [member function]
cls.add_method('SetLegend',
'void',
[param('std::string const &', 'xLegend'), param('std::string const &', 'yLegend')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot::SetOutputFilename(std::string const & outputFilename) [member function]
cls.add_method('SetOutputFilename',
'void',
[param('std::string const &', 'outputFilename')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot::SetTerminal(std::string const & terminal) [member function]
cls.add_method('SetTerminal',
'void',
[param('std::string const &', 'terminal')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot::SetTitle(std::string const & title) [member function]
cls.add_method('SetTitle',
'void',
[param('std::string const &', 'title')])
return
def register_Ns3GnuplotCollection_methods(root_module, cls):
## gnuplot.h (module 'stats'): ns3::GnuplotCollection::GnuplotCollection(ns3::GnuplotCollection const & arg0) [constructor]
cls.add_constructor([param('ns3::GnuplotCollection const &', 'arg0')])
## gnuplot.h (module 'stats'): ns3::GnuplotCollection::GnuplotCollection(std::string const & outputFilename) [constructor]
cls.add_constructor([param('std::string const &', 'outputFilename')])
## gnuplot.h (module 'stats'): void ns3::GnuplotCollection::AddPlot(ns3::Gnuplot const & plot) [member function]
cls.add_method('AddPlot',
'void',
[param('ns3::Gnuplot const &', 'plot')])
## gnuplot.h (module 'stats'): void ns3::GnuplotCollection::GenerateOutput(std::ostream & os) [member function]
cls.add_method('GenerateOutput',
'void',
[param('std::ostream &', 'os')])
## gnuplot.h (module 'stats'): void ns3::GnuplotCollection::GenerateOutput(std::ostream & osControl, std::ostream & osData, std::string dataFileName) [member function]
cls.add_method('GenerateOutput',
'void',
[param('std::ostream &', 'osControl'), param('std::ostream &', 'osData'), param('std::string', 'dataFileName')])
## gnuplot.h (module 'stats'): ns3::Gnuplot & ns3::GnuplotCollection::GetPlot(unsigned int id) [member function]
cls.add_method('GetPlot',
'ns3::Gnuplot &',
[param('unsigned int', 'id')])
## gnuplot.h (module 'stats'): void ns3::GnuplotCollection::SetTerminal(std::string const & terminal) [member function]
cls.add_method('SetTerminal',
'void',
[param('std::string const &', 'terminal')])
return
def register_Ns3GnuplotDataset_methods(root_module, cls):
## gnuplot.h (module 'stats'): ns3::GnuplotDataset::GnuplotDataset(ns3::GnuplotDataset const & original) [constructor]
cls.add_constructor([param('ns3::GnuplotDataset const &', 'original')])
## gnuplot.h (module 'stats'): static void ns3::GnuplotDataset::SetDefaultExtra(std::string const & extra) [member function]
cls.add_method('SetDefaultExtra',
'void',
[param('std::string const &', 'extra')],
is_static=True)
## gnuplot.h (module 'stats'): void ns3::GnuplotDataset::SetExtra(std::string const & extra) [member function]
cls.add_method('SetExtra',
'void',
[param('std::string const &', 'extra')])
## gnuplot.h (module 'stats'): void ns3::GnuplotDataset::SetTitle(std::string const & title) [member function]
cls.add_method('SetTitle',
'void',
[param('std::string const &', 'title')])
## gnuplot.h (module 'stats'): ns3::GnuplotDataset::GnuplotDataset(ns3::GnuplotDataset::Data * data) [constructor]
cls.add_constructor([param('ns3::GnuplotDataset::Data *', 'data')],
visibility='protected')
return
def register_Ns3GnuplotHelper_methods(root_module, cls):
## gnuplot-helper.h (module 'stats'): ns3::GnuplotHelper::GnuplotHelper(ns3::GnuplotHelper const & arg0) [constructor]
cls.add_constructor([param('ns3::GnuplotHelper const &', 'arg0')])
## gnuplot-helper.h (module 'stats'): ns3::GnuplotHelper::GnuplotHelper() [constructor]
cls.add_constructor([])
## gnuplot-helper.h (module 'stats'): ns3::GnuplotHelper::GnuplotHelper(std::string const & outputFileNameWithoutExtension, std::string const & title, std::string const & xLegend, std::string const & yLegend, std::string const & terminalType="png") [constructor]
cls.add_constructor([param('std::string const &', 'outputFileNameWithoutExtension'), param('std::string const &', 'title'), param('std::string const &', 'xLegend'), param('std::string const &', 'yLegend'), param('std::string const &', 'terminalType', default_value='"png"')])
## gnuplot-helper.h (module 'stats'): void ns3::GnuplotHelper::AddTimeSeriesAdaptor(std::string const & adaptorName) [member function]
cls.add_method('AddTimeSeriesAdaptor',
'void',
[param('std::string const &', 'adaptorName')])
## gnuplot-helper.h (module 'stats'): void ns3::GnuplotHelper::ConfigurePlot(std::string const & outputFileNameWithoutExtension, std::string const & title, std::string const & xLegend, std::string const & yLegend, std::string const & terminalType="png") [member function]
cls.add_method('ConfigurePlot',
'void',
[param('std::string const &', 'outputFileNameWithoutExtension'), param('std::string const &', 'title'), param('std::string const &', 'xLegend'), param('std::string const &', 'yLegend'), param('std::string const &', 'terminalType', default_value='"png"')])
## gnuplot-helper.h (module 'stats'): ns3::Ptr<ns3::GnuplotAggregator> ns3::GnuplotHelper::GetAggregator() [member function]
cls.add_method('GetAggregator',
'ns3::Ptr< ns3::GnuplotAggregator >',
[])
## gnuplot-helper.h (module 'stats'): ns3::Ptr<ns3::Probe> ns3::GnuplotHelper::GetProbe(std::string probeName) const [member function]
cls.add_method('GetProbe',
'ns3::Ptr< ns3::Probe >',
[param('std::string', 'probeName')],
is_const=True)
## gnuplot-helper.h (module 'stats'): void ns3::GnuplotHelper::PlotProbe(std::string const & typeId, std::string const & path, std::string const & probeTraceSource, std::string const & title, ns3::GnuplotAggregator::KeyLocation keyLocation=::ns3::GnuplotAggregator::KeyLocation::KEY_INSIDE) [member function]
cls.add_method('PlotProbe',
'void',
[param('std::string const &', 'typeId'), param('std::string const &', 'path'), param('std::string const &', 'probeTraceSource'), param('std::string const &', 'title'), param('ns3::GnuplotAggregator::KeyLocation', 'keyLocation', default_value='::ns3::GnuplotAggregator::KeyLocation::KEY_INSIDE')])
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3Histogram_methods(root_module, cls):
## histogram.h (module 'stats'): ns3::Histogram::Histogram(ns3::Histogram const & arg0) [constructor]
cls.add_constructor([param('ns3::Histogram const &', 'arg0')])
## histogram.h (module 'stats'): ns3::Histogram::Histogram(double binWidth) [constructor]
cls.add_constructor([param('double', 'binWidth')])
## histogram.h (module 'stats'): ns3::Histogram::Histogram() [constructor]
cls.add_constructor([])
## histogram.h (module 'stats'): void ns3::Histogram::AddValue(double value) [member function]
cls.add_method('AddValue',
'void',
[param('double', 'value')])
## histogram.h (module 'stats'): uint32_t ns3::Histogram::GetBinCount(uint32_t index) [member function]
cls.add_method('GetBinCount',
'uint32_t',
[param('uint32_t', 'index')])
## histogram.h (module 'stats'): double ns3::Histogram::GetBinEnd(uint32_t index) [member function]
cls.add_method('GetBinEnd',
'double',
[param('uint32_t', 'index')])
## histogram.h (module 'stats'): double ns3::Histogram::GetBinStart(uint32_t index) [member function]
cls.add_method('GetBinStart',
'double',
[param('uint32_t', 'index')])
## histogram.h (module 'stats'): double ns3::Histogram::GetBinWidth(uint32_t index) const [member function]
cls.add_method('GetBinWidth',
'double',
[param('uint32_t', 'index')],
is_const=True)
## histogram.h (module 'stats'): uint32_t ns3::Histogram::GetNBins() const [member function]
cls.add_method('GetNBins',
'uint32_t',
[],
is_const=True)
## histogram.h (module 'stats'): void ns3::Histogram::SerializeToXmlStream(std::ostream & os, uint16_t indent, std::string elementName) const [member function]
cls.add_method('SerializeToXmlStream',
'void',
[param('std::ostream &', 'os'), param('uint16_t', 'indent'), param('std::string', 'elementName')],
is_const=True)
## histogram.h (module 'stats'): void ns3::Histogram::SetDefaultBinWidth(double binWidth) [member function]
cls.add_method('SetDefaultBinWidth',
'void',
[param('double', 'binWidth')])
return
def register_Ns3LogComponent_methods(root_module, cls):
## log.h (module 'core'): ns3::LogComponent::LogComponent(ns3::LogComponent const & arg0) [constructor]
cls.add_constructor([param('ns3::LogComponent const &', 'arg0')])
## log.h (module 'core'): ns3::LogComponent::LogComponent(std::string const & name, std::string const & file, ns3::LogLevel const mask=::ns3::LogLevel::LOG_NONE) [constructor]
cls.add_constructor([param('std::string const &', 'name'), param('std::string const &', 'file'), param('ns3::LogLevel const', 'mask', default_value='::ns3::LogLevel::LOG_NONE')])
## log.h (module 'core'): void ns3::LogComponent::Disable(ns3::LogLevel const level) [member function]
cls.add_method('Disable',
'void',
[param('ns3::LogLevel const', 'level')])
## log.h (module 'core'): void ns3::LogComponent::Enable(ns3::LogLevel const level) [member function]
cls.add_method('Enable',
'void',
[param('ns3::LogLevel const', 'level')])
## log.h (module 'core'): std::string ns3::LogComponent::File() const [member function]
cls.add_method('File',
'std::string',
[],
is_const=True)
## log.h (module 'core'): static ns3::LogComponent::ComponentList * ns3::LogComponent::GetComponentList() [member function]
cls.add_method('GetComponentList',
'ns3::LogComponent::ComponentList *',
[],
is_static=True)
## log.h (module 'core'): static std::string ns3::LogComponent::GetLevelLabel(ns3::LogLevel const level) [member function]
cls.add_method('GetLevelLabel',
'std::string',
[param('ns3::LogLevel const', 'level')],
is_static=True)
## log.h (module 'core'): bool ns3::LogComponent::IsEnabled(ns3::LogLevel const level) const [member function]
cls.add_method('IsEnabled',
'bool',
[param('ns3::LogLevel const', 'level')],
is_const=True)
## log.h (module 'core'): bool ns3::LogComponent::IsNoneEnabled() const [member function]
cls.add_method('IsNoneEnabled',
'bool',
[],
is_const=True)
## log.h (module 'core'): char const * ns3::LogComponent::Name() const [member function]
cls.add_method('Name',
'char const *',
[],
is_const=True)
## log.h (module 'core'): void ns3::LogComponent::SetMask(ns3::LogLevel const level) [member function]
cls.add_method('SetMask',
'void',
[param('ns3::LogLevel const', 'level')])
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
is_virtual=True, visibility='protected')
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string const & typeId) [constructor]
cls.add_constructor([param('std::string const &', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactory::IsTypeIdSet() const [member function]
cls.add_method('IsTypeIdSet',
'bool',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set() [member function]
cls.add_method('Set',
'void',
[])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3ParameterLogger_methods(root_module, cls):
## log.h (module 'core'): ns3::ParameterLogger::ParameterLogger(ns3::ParameterLogger const & arg0) [constructor]
cls.add_constructor([param('ns3::ParameterLogger const &', 'arg0')])
## log.h (module 'core'): ns3::ParameterLogger::ParameterLogger(std::ostream & os) [constructor]
cls.add_constructor([param('std::ostream &', 'os')])
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
return
def register_Ns3Simulator_methods(root_module, cls):
## simulator.h (module 'core'): ns3::Simulator::Simulator(ns3::Simulator const & arg0) [constructor]
cls.add_constructor([param('ns3::Simulator const &', 'arg0')])
## simulator.h (module 'core'): static void ns3::Simulator::Cancel(ns3::EventId const & id) [member function]
cls.add_method('Cancel',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Destroy() [member function]
cls.add_method('Destroy',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetContext() [member function]
cls.add_method('GetContext',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetDelayLeft(ns3::EventId const & id) [member function]
cls.add_method('GetDelayLeft',
'ns3::Time',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static uint64_t ns3::Simulator::GetEventCount() [member function]
cls.add_method('GetEventCount',
'uint64_t',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Ptr<ns3::SimulatorImpl> ns3::Simulator::GetImplementation() [member function]
cls.add_method('GetImplementation',
'ns3::Ptr< ns3::SimulatorImpl >',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::GetMaximumSimulationTime() [member function]
cls.add_method('GetMaximumSimulationTime',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static uint32_t ns3::Simulator::GetSystemId() [member function]
cls.add_method('GetSystemId',
'uint32_t',
[],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsExpired(ns3::EventId const & id) [member function]
cls.add_method('IsExpired',
'bool',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static bool ns3::Simulator::IsFinished() [member function]
cls.add_method('IsFinished',
'bool',
[],
is_static=True)
## simulator.h (module 'core'): static ns3::Time ns3::Simulator::Now() [member function]
cls.add_method('Now',
'ns3::Time',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Remove(ns3::EventId const & id) [member function]
cls.add_method('Remove',
'void',
[param('ns3::EventId const &', 'id')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetImplementation(ns3::Ptr<ns3::SimulatorImpl> impl) [member function]
cls.add_method('SetImplementation',
'void',
[param('ns3::Ptr< ns3::SimulatorImpl >', 'impl')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::SetScheduler(ns3::ObjectFactory schedulerFactory) [member function]
cls.add_method('SetScheduler',
'void',
[param('ns3::ObjectFactory', 'schedulerFactory')],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop() [member function]
cls.add_method('Stop',
'void',
[],
is_static=True)
## simulator.h (module 'core'): static void ns3::Simulator::Stop(ns3::Time const & delay) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'delay')],
is_static=True)
return
def register_Ns3StatisticalSummary_methods(root_module, cls):
## data-calculator.h (module 'stats'): ns3::StatisticalSummary::StatisticalSummary() [constructor]
cls.add_constructor([])
## data-calculator.h (module 'stats'): ns3::StatisticalSummary::StatisticalSummary(ns3::StatisticalSummary const & arg0) [constructor]
cls.add_constructor([param('ns3::StatisticalSummary const &', 'arg0')])
## data-calculator.h (module 'stats'): long int ns3::StatisticalSummary::getCount() const [member function]
cls.add_method('getCount',
'long int',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getMax() const [member function]
cls.add_method('getMax',
'double',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getMean() const [member function]
cls.add_method('getMean',
'double',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getMin() const [member function]
cls.add_method('getMin',
'double',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getSqrSum() const [member function]
cls.add_method('getSqrSum',
'double',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getStddev() const [member function]
cls.add_method('getStddev',
'double',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getSum() const [member function]
cls.add_method('getSum',
'double',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## data-calculator.h (module 'stats'): double ns3::StatisticalSummary::getVariance() const [member function]
cls.add_method('getVariance',
'double',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('>=')
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::Time'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', 'right'))
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit=::ns3::Time::Unit::AUTO) const [member function]
cls.add_method('As',
'ns3::TimeWithUnit',
[param('ns3::Time::Unit const', 'unit', default_value='::ns3::Time::Unit::AUTO')],
is_const=True)
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function]
cls.add_method('GetDays',
'double',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function]
cls.add_method('GetHours',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function]
cls.add_method('GetMinutes',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function]
cls.add_method('GetYears',
'double',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function]
cls.add_method('Max',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function]
cls.add_method('Min',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): ns3::Time ns3::Time::RoundTo(ns3::Time::Unit unit) const [member function]
cls.add_method('RoundTo',
'ns3::Time',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function]
cls.add_method('StaticInit',
'bool',
[],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
return
def register_Ns3TimeWithUnit_methods(root_module, cls):
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [constructor]
cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor]
cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')])
return
def register_Ns3TracedValue__Bool_methods(root_module, cls):
## traced-value.h (module 'core'): ns3::TracedValue<bool>::TracedValue() [constructor]
cls.add_constructor([])
## traced-value.h (module 'core'): ns3::TracedValue<bool>::TracedValue(ns3::TracedValue<bool> const & o) [constructor]
cls.add_constructor([param('ns3::TracedValue< bool > const &', 'o')])
## traced-value.h (module 'core'): ns3::TracedValue<bool>::TracedValue(bool const & v) [constructor]
cls.add_constructor([param('bool const &', 'v')])
## traced-value.h (module 'core'): ns3::TracedValue<bool>::TracedValue(ns3::TracedValue<bool> const & other) [constructor]
cls.add_constructor([param('ns3::TracedValue< bool > const &', 'other')])
## traced-value.h (module 'core'): ns3::TracedValue<bool>::TracedValue(ns3::TracedValue<bool> const & other) [constructor]
cls.add_constructor([param('ns3::TracedValue< bool > const &', 'other')])
## traced-value.h (module 'core'): void ns3::TracedValue<bool>::Connect(ns3::CallbackBase const & cb, std::string path) [member function]
cls.add_method('Connect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<bool>::ConnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('ConnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): void ns3::TracedValue<bool>::Disconnect(ns3::CallbackBase const & cb, std::string path) [member function]
cls.add_method('Disconnect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<bool>::DisconnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('DisconnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): bool ns3::TracedValue<bool>::Get() const [member function]
cls.add_method('Get',
'bool',
[],
is_const=True)
## traced-value.h (module 'core'): void ns3::TracedValue<bool>::Set(bool const & v) [member function]
cls.add_method('Set',
'void',
[param('bool const &', 'v')])
return
def register_Ns3TracedValue__Double_methods(root_module, cls):
## traced-value.h (module 'core'): ns3::TracedValue<double>::TracedValue() [constructor]
cls.add_constructor([])
## traced-value.h (module 'core'): ns3::TracedValue<double>::TracedValue(ns3::TracedValue<double> const & o) [constructor]
cls.add_constructor([param('ns3::TracedValue< double > const &', 'o')])
## traced-value.h (module 'core'): ns3::TracedValue<double>::TracedValue(double const & v) [constructor]
cls.add_constructor([param('double const &', 'v')])
## traced-value.h (module 'core'): ns3::TracedValue<double>::TracedValue(ns3::TracedValue<double> const & other) [constructor]
cls.add_constructor([param('ns3::TracedValue< double > const &', 'other')])
## traced-value.h (module 'core'): ns3::TracedValue<double>::TracedValue(ns3::TracedValue<double> const & other) [constructor]
cls.add_constructor([param('ns3::TracedValue< double > const &', 'other')])
## traced-value.h (module 'core'): void ns3::TracedValue<double>::Connect(ns3::CallbackBase const & cb, std::string path) [member function]
cls.add_method('Connect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<double>::ConnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('ConnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): void ns3::TracedValue<double>::Disconnect(ns3::CallbackBase const & cb, std::string path) [member function]
cls.add_method('Disconnect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<double>::DisconnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('DisconnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): double ns3::TracedValue<double>::Get() const [member function]
cls.add_method('Get',
'double',
[],
is_const=True)
## traced-value.h (module 'core'): void ns3::TracedValue<double>::Set(double const & v) [member function]
cls.add_method('Set',
'void',
[param('double const &', 'v')])
return
def register_Ns3TracedValue__Unsigned_char_methods(root_module, cls):
## traced-value.h (module 'core'): ns3::TracedValue<unsigned char>::TracedValue() [constructor]
cls.add_constructor([])
## traced-value.h (module 'core'): ns3::TracedValue<unsigned char>::TracedValue(ns3::TracedValue<unsigned char> const & o) [constructor]
cls.add_constructor([param('ns3::TracedValue< unsigned char > const &', 'o')])
## traced-value.h (module 'core'): ns3::TracedValue<unsigned char>::TracedValue(unsigned char const & v) [constructor]
cls.add_constructor([param('unsigned char const &', 'v')])
## traced-value.h (module 'core'): ns3::TracedValue<unsigned char>::TracedValue(ns3::TracedValue<unsigned char> const & other) [constructor]
cls.add_constructor([param('ns3::TracedValue< unsigned char > const &', 'other')])
## traced-value.h (module 'core'): ns3::TracedValue<unsigned char>::TracedValue(ns3::TracedValue<unsigned char> const & other) [constructor]
cls.add_constructor([param('ns3::TracedValue< unsigned char > const &', 'other')])
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned char>::Connect(ns3::CallbackBase const & cb, std::string path) [member function]
cls.add_method('Connect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned char>::ConnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('ConnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned char>::Disconnect(ns3::CallbackBase const & cb, std::string path) [member function]
cls.add_method('Disconnect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned char>::DisconnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('DisconnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): unsigned char ns3::TracedValue<unsigned char>::Get() const [member function]
cls.add_method('Get',
'unsigned char',
[],
is_const=True)
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned char>::Set(unsigned char const & v) [member function]
cls.add_method('Set',
'void',
[param('unsigned char const &', 'v')])
return
def register_Ns3TracedValue__Unsigned_int_methods(root_module, cls):
## traced-value.h (module 'core'): ns3::TracedValue<unsigned int>::TracedValue() [constructor]
cls.add_constructor([])
## traced-value.h (module 'core'): ns3::TracedValue<unsigned int>::TracedValue(ns3::TracedValue<unsigned int> const & o) [constructor]
cls.add_constructor([param('ns3::TracedValue< unsigned int > const &', 'o')])
## traced-value.h (module 'core'): ns3::TracedValue<unsigned int>::TracedValue(unsigned int const & v) [constructor]
cls.add_constructor([param('unsigned int const &', 'v')])
## traced-value.h (module 'core'): ns3::TracedValue<unsigned int>::TracedValue(ns3::TracedValue<unsigned int> const & other) [constructor]
cls.add_constructor([param('ns3::TracedValue< unsigned int > const &', 'other')])
## traced-value.h (module 'core'): ns3::TracedValue<unsigned int>::TracedValue(ns3::TracedValue<unsigned int> const & other) [constructor]
cls.add_constructor([param('ns3::TracedValue< unsigned int > const &', 'other')])
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned int>::Connect(ns3::CallbackBase const & cb, std::string path) [member function]
cls.add_method('Connect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned int>::ConnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('ConnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned int>::Disconnect(ns3::CallbackBase const & cb, std::string path) [member function]
cls.add_method('Disconnect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned int>::DisconnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('DisconnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): unsigned int ns3::TracedValue<unsigned int>::Get() const [member function]
cls.add_method('Get',
'unsigned int',
[],
is_const=True)
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned int>::Set(unsigned int const & v) [member function]
cls.add_method('Set',
'void',
[param('unsigned int const &', 'v')])
return
def register_Ns3TracedValue__Unsigned_short_methods(root_module, cls):
## traced-value.h (module 'core'): ns3::TracedValue<unsigned short>::TracedValue() [constructor]
cls.add_constructor([])
## traced-value.h (module 'core'): ns3::TracedValue<unsigned short>::TracedValue(ns3::TracedValue<unsigned short> const & o) [constructor]
cls.add_constructor([param('ns3::TracedValue< unsigned short > const &', 'o')])
## traced-value.h (module 'core'): ns3::TracedValue<unsigned short>::TracedValue(short unsigned int const & v) [constructor]
cls.add_constructor([param('short unsigned int const &', 'v')])
## traced-value.h (module 'core'): ns3::TracedValue<unsigned short>::TracedValue(ns3::TracedValue<unsigned short> const & other) [constructor]
cls.add_constructor([param('ns3::TracedValue< unsigned short > const &', 'other')])
## traced-value.h (module 'core'): ns3::TracedValue<unsigned short>::TracedValue(ns3::TracedValue<unsigned short> const & other) [constructor]
cls.add_constructor([param('ns3::TracedValue< unsigned short > const &', 'other')])
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned short>::Connect(ns3::CallbackBase const & cb, std::string path) [member function]
cls.add_method('Connect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned short>::ConnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('ConnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned short>::Disconnect(ns3::CallbackBase const & cb, std::string path) [member function]
cls.add_method('Disconnect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned short>::DisconnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('DisconnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h (module 'core'): short unsigned int ns3::TracedValue<unsigned short>::Get() const [member function]
cls.add_method('Get',
'short unsigned int',
[],
is_const=True)
## traced-value.h (module 'core'): void ns3::TracedValue<unsigned short>::Set(short unsigned int const & v) [member function]
cls.add_method('Set',
'void',
[param('short unsigned int const &', 'v')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<const ns3::AttributeAccessor> accessor, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<const ns3::AttributeAccessor> accessor, ns3::Ptr<const ns3::AttributeChecker> checker, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<const ns3::TraceSourceAccessor> accessor, std::string callback, ns3::TypeId::SupportLevel supportLevel=::ns3::TypeId::SupportLevel::SUPPORTED, std::string const & supportMsg="") [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback'), param('ns3::TypeId::SupportLevel', 'supportLevel', default_value='::ns3::TypeId::SupportLevel::SUPPORTED'), param('std::string const &', 'supportMsg', default_value='""')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(std::size_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('std::size_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(std::size_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('std::size_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::hash_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'ns3::TypeId::hash_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint16_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint16_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint16_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint16_t',
[],
is_static=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function]
cls.add_method('GetSize',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(std::size_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('std::size_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(ns3::TypeId::hash_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(ns3::TypeId::hash_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<const ns3::TraceSourceAccessor> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): ns3::Ptr<const ns3::TraceSourceAccessor> ns3::TypeId::LookupTraceSourceByName(std::string name, ns3::TypeId::TraceSourceInformation * info) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name'), param('ns3::TypeId::TraceSourceInformation *', 'info')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(std::size_t i, ns3::Ptr<const ns3::AttributeValue> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('std::size_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent() [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[],
template_parameters=['ns3::Object'])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function]
cls.add_method('SetSize',
'ns3::TypeId',
[param('std::size_t', 'size')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t uid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'uid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable]
cls.add_instance_attribute('callback', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportLevel [variable]
cls.add_instance_attribute('supportLevel', 'ns3::TypeId::SupportLevel', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::supportMsg [variable]
cls.add_instance_attribute('supportMsg', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::int64x64_t'], param('ns3::Time const &', 'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', 'right'))
cls.add_binary_comparison_operator('!=')
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('>=')
cls.add_output_stream_operator()
cls.add_unary_numeric_operator('-')
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', 'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', 'right'))
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('==')
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(double const value) [constructor]
cls.add_constructor([param('double const', 'value')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long double const value) [constructor]
cls.add_constructor([param('long double const', 'value')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(int const v) [constructor]
cls.add_constructor([param('int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long int const v) [constructor]
cls.add_constructor([param('long int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int const v) [constructor]
cls.add_constructor([param('long long int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int const v) [constructor]
cls.add_constructor([param('unsigned int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int const v) [constructor]
cls.add_constructor([param('long unsigned int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int const v) [constructor]
cls.add_constructor([param('long long unsigned int const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(int128_t const v) [constructor]
cls.add_constructor([param('int128_t const', 'v')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t const hi, uint64_t const lo) [constructor]
cls.add_constructor([param('int64_t const', 'hi'), param('uint64_t const', 'lo')])
## int64x64-128.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-128.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-128.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-128.h (module 'core'): int64_t ns3::int64x64_t::GetInt() const [member function]
cls.add_method('GetInt',
'int64_t',
[],
is_const=True)
## int64x64-128.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-128.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t const v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t const', 'v')],
is_static=True)
## int64x64-128.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
## int64x64-128.h (module 'core'): int64_t ns3::int64x64_t::Round() const [member function]
cls.add_method('Round',
'int64_t',
[],
is_const=True)
## int64x64-128.h (module 'core'): ns3::int64x64_t::implementation [variable]
cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True)
return
def register_Ns3Gnuplot2dDataset_methods(root_module, cls):
## gnuplot.h (module 'stats'): ns3::Gnuplot2dDataset::Gnuplot2dDataset(ns3::Gnuplot2dDataset const & arg0) [constructor]
cls.add_constructor([param('ns3::Gnuplot2dDataset const &', 'arg0')])
## gnuplot.h (module 'stats'): ns3::Gnuplot2dDataset::Gnuplot2dDataset(std::string const & title="Untitled") [constructor]
cls.add_constructor([param('std::string const &', 'title', default_value='"Untitled"')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot2dDataset::Add(double x, double y) [member function]
cls.add_method('Add',
'void',
[param('double', 'x'), param('double', 'y')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot2dDataset::Add(double x, double y, double errorDelta) [member function]
cls.add_method('Add',
'void',
[param('double', 'x'), param('double', 'y'), param('double', 'errorDelta')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot2dDataset::Add(double x, double y, double xErrorDelta, double yErrorDelta) [member function]
cls.add_method('Add',
'void',
[param('double', 'x'), param('double', 'y'), param('double', 'xErrorDelta'), param('double', 'yErrorDelta')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot2dDataset::AddEmptyLine() [member function]
cls.add_method('AddEmptyLine',
'void',
[])
## gnuplot.h (module 'stats'): static void ns3::Gnuplot2dDataset::SetDefaultErrorBars(ns3::Gnuplot2dDataset::ErrorBars errorBars) [member function]
cls.add_method('SetDefaultErrorBars',
'void',
[param('ns3::Gnuplot2dDataset::ErrorBars', 'errorBars')],
is_static=True)
## gnuplot.h (module 'stats'): static void ns3::Gnuplot2dDataset::SetDefaultStyle(ns3::Gnuplot2dDataset::Style style) [member function]
cls.add_method('SetDefaultStyle',
'void',
[param('ns3::Gnuplot2dDataset::Style', 'style')],
is_static=True)
## gnuplot.h (module 'stats'): void ns3::Gnuplot2dDataset::SetErrorBars(ns3::Gnuplot2dDataset::ErrorBars errorBars) [member function]
cls.add_method('SetErrorBars',
'void',
[param('ns3::Gnuplot2dDataset::ErrorBars', 'errorBars')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot2dDataset::SetStyle(ns3::Gnuplot2dDataset::Style style) [member function]
cls.add_method('SetStyle',
'void',
[param('ns3::Gnuplot2dDataset::Style', 'style')])
return
def register_Ns3Gnuplot2dFunction_methods(root_module, cls):
## gnuplot.h (module 'stats'): ns3::Gnuplot2dFunction::Gnuplot2dFunction(ns3::Gnuplot2dFunction const & arg0) [constructor]
cls.add_constructor([param('ns3::Gnuplot2dFunction const &', 'arg0')])
## gnuplot.h (module 'stats'): ns3::Gnuplot2dFunction::Gnuplot2dFunction(std::string const & title="Untitled", std::string const & function="") [constructor]
cls.add_constructor([param('std::string const &', 'title', default_value='"Untitled"'), param('std::string const &', 'function', default_value='""')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot2dFunction::SetFunction(std::string const & function) [member function]
cls.add_method('SetFunction',
'void',
[param('std::string const &', 'function')])
return
def register_Ns3Gnuplot3dDataset_methods(root_module, cls):
## gnuplot.h (module 'stats'): ns3::Gnuplot3dDataset::Gnuplot3dDataset(ns3::Gnuplot3dDataset const & arg0) [constructor]
cls.add_constructor([param('ns3::Gnuplot3dDataset const &', 'arg0')])
## gnuplot.h (module 'stats'): ns3::Gnuplot3dDataset::Gnuplot3dDataset(std::string const & title="Untitled") [constructor]
cls.add_constructor([param('std::string const &', 'title', default_value='"Untitled"')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot3dDataset::Add(double x, double y, double z) [member function]
cls.add_method('Add',
'void',
[param('double', 'x'), param('double', 'y'), param('double', 'z')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot3dDataset::AddEmptyLine() [member function]
cls.add_method('AddEmptyLine',
'void',
[])
## gnuplot.h (module 'stats'): static void ns3::Gnuplot3dDataset::SetDefaultStyle(std::string const & style) [member function]
cls.add_method('SetDefaultStyle',
'void',
[param('std::string const &', 'style')],
is_static=True)
## gnuplot.h (module 'stats'): void ns3::Gnuplot3dDataset::SetStyle(std::string const & style) [member function]
cls.add_method('SetStyle',
'void',
[param('std::string const &', 'style')])
return
def register_Ns3Gnuplot3dFunction_methods(root_module, cls):
## gnuplot.h (module 'stats'): ns3::Gnuplot3dFunction::Gnuplot3dFunction(ns3::Gnuplot3dFunction const & arg0) [constructor]
cls.add_constructor([param('ns3::Gnuplot3dFunction const &', 'arg0')])
## gnuplot.h (module 'stats'): ns3::Gnuplot3dFunction::Gnuplot3dFunction(std::string const & title="Untitled", std::string const & function="") [constructor]
cls.add_constructor([param('std::string const &', 'title', default_value='"Untitled"'), param('std::string const &', 'function', default_value='""')])
## gnuplot.h (module 'stats'): void ns3::Gnuplot3dFunction::SetFunction(std::string const & function) [member function]
cls.add_method('SetFunction',
'void',
[param('std::string const &', 'function')])
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object> ns3::Object::GetObject() const [member function]
cls.add_method('GetObject',
'ns3::Ptr< ns3::Object >',
[],
custom_template_method_name='GetObject', is_const=True, template_parameters=['ns3::Object'])
## object.h (module 'core'): ns3::Ptr<ns3::Object> ns3::Object::GetObject(ns3::TypeId tid) const [member function]
cls.add_method('GetObject',
'ns3::Ptr< ns3::Object >',
[param('ns3::TypeId', 'tid')],
custom_template_method_name='GetObject', is_const=True, template_parameters=['ns3::Object'])
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): bool ns3::Object::IsInitialized() const [member function]
cls.add_method('IsInitialized',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True, visibility='protected')
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
is_virtual=True, visibility='protected')
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
is_virtual=True, visibility='protected')
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<const ns3::Object> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3SQLiteOutput_Ns3Empty_Ns3DefaultDeleter__lt__ns3SQLiteOutput__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SQLiteOutput, ns3::empty, ns3::DefaultDeleter<ns3::SQLiteOutput> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::SQLiteOutput, ns3::empty, ns3::DefaultDeleter<ns3::SQLiteOutput> >::SimpleRefCount(ns3::SimpleRefCount<ns3::SQLiteOutput, ns3::empty, ns3::DefaultDeleter<ns3::SQLiteOutput> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::SQLiteOutput, ns3::empty, ns3::DefaultDeleter< ns3::SQLiteOutput > > const &', 'o')])
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_const=True, is_pure_virtual=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_const=True, is_pure_virtual=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_pure_virtual=True, is_virtual=True)
return
def register_Ns3BooleanChecker_methods(root_module, cls):
## boolean.h (module 'core'): ns3::BooleanChecker::BooleanChecker() [constructor]
cls.add_constructor([])
## boolean.h (module 'core'): ns3::BooleanChecker::BooleanChecker(ns3::BooleanChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::BooleanChecker const &', 'arg0')])
return
def register_Ns3BooleanValue_methods(root_module, cls):
cls.add_output_stream_operator()
## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue(ns3::BooleanValue const & arg0) [constructor]
cls.add_constructor([param('ns3::BooleanValue const &', 'arg0')])
## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue() [constructor]
cls.add_constructor([])
## boolean.h (module 'core'): ns3::BooleanValue::BooleanValue(bool value) [constructor]
cls.add_constructor([param('bool', 'value')])
## boolean.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::BooleanValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## boolean.h (module 'core'): bool ns3::BooleanValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## boolean.h (module 'core'): bool ns3::BooleanValue::Get() const [member function]
cls.add_method('Get',
'bool',
[],
is_const=True)
## boolean.h (module 'core'): std::string ns3::BooleanValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## boolean.h (module 'core'): void ns3::BooleanValue::Set(bool value) [member function]
cls.add_method('Set',
'void',
[param('bool', 'value')])
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_pure_virtual=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<const ns3::CallbackImplBase> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, template_parameters=['ns3::ObjectBase*'], visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, template_parameters=['void'], visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, template_parameters=['bool'], visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, template_parameters=['double'], visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, template_parameters=['unsigned short'], visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, template_parameters=['unsigned int'], visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::GetCppTypeid() [member function]
cls.add_method('GetCppTypeid',
'std::string',
[],
is_static=True, template_parameters=['unsigned char'], visibility='protected')
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3DataCalculator_methods(root_module, cls):
## data-calculator.h (module 'stats'): ns3::DataCalculator::DataCalculator(ns3::DataCalculator const & arg0) [constructor]
cls.add_constructor([param('ns3::DataCalculator const &', 'arg0')])
## data-calculator.h (module 'stats'): ns3::DataCalculator::DataCalculator() [constructor]
cls.add_constructor([])
## data-calculator.h (module 'stats'): void ns3::DataCalculator::Disable() [member function]
cls.add_method('Disable',
'void',
[])
## data-calculator.h (module 'stats'): void ns3::DataCalculator::Enable() [member function]
cls.add_method('Enable',
'void',
[])
## data-calculator.h (module 'stats'): std::string ns3::DataCalculator::GetContext() const [member function]
cls.add_method('GetContext',
'std::string',
[],
is_const=True)
## data-calculator.h (module 'stats'): bool ns3::DataCalculator::GetEnabled() const [member function]
cls.add_method('GetEnabled',
'bool',
[],
is_const=True)
## data-calculator.h (module 'stats'): std::string ns3::DataCalculator::GetKey() const [member function]
cls.add_method('GetKey',
'std::string',
[],
is_const=True)
## data-calculator.h (module 'stats'): static ns3::TypeId ns3::DataCalculator::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## data-calculator.h (module 'stats'): void ns3::DataCalculator::Output(ns3::DataOutputCallback & callback) const [member function]
cls.add_method('Output',
'void',
[param('ns3::DataOutputCallback &', 'callback')],
is_const=True, is_pure_virtual=True, is_virtual=True)
## data-calculator.h (module 'stats'): void ns3::DataCalculator::SetContext(std::string const context) [member function]
cls.add_method('SetContext',
'void',
[param('std::string const', 'context')])
## data-calculator.h (module 'stats'): void ns3::DataCalculator::SetKey(std::string const key) [member function]
cls.add_method('SetKey',
'void',
[param('std::string const', 'key')])
## data-calculator.h (module 'stats'): void ns3::DataCalculator::Start(ns3::Time const & startTime) [member function]
cls.add_method('Start',
'void',
[param('ns3::Time const &', 'startTime')],
is_virtual=True)
## data-calculator.h (module 'stats'): void ns3::DataCalculator::Stop(ns3::Time const & stopTime) [member function]
cls.add_method('Stop',
'void',
[param('ns3::Time const &', 'stopTime')],
is_virtual=True)
## data-calculator.h (module 'stats'): void ns3::DataCalculator::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True, visibility='protected')
return
def register_Ns3DataCollectionObject_methods(root_module, cls):
## data-collection-object.h (module 'stats'): ns3::DataCollectionObject::DataCollectionObject(ns3::DataCollectionObject const & arg0) [constructor]
cls.add_constructor([param('ns3::DataCollectionObject const &', 'arg0')])
## data-collection-object.h (module 'stats'): ns3::DataCollectionObject::DataCollectionObject() [constructor]
cls.add_constructor([])
## data-collection-object.h (module 'stats'): void ns3::DataCollectionObject::Disable() [member function]
cls.add_method('Disable',
'void',
[])
## data-collection-object.h (module 'stats'): void ns3::DataCollectionObject::Enable() [member function]
cls.add_method('Enable',
'void',
[])
## data-collection-object.h (module 'stats'): std::string ns3::DataCollectionObject::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## data-collection-object.h (module 'stats'): static ns3::TypeId ns3::DataCollectionObject::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## data-collection-object.h (module 'stats'): bool ns3::DataCollectionObject::IsEnabled() const [member function]
cls.add_method('IsEnabled',
'bool',
[],
is_const=True, is_virtual=True)
## data-collection-object.h (module 'stats'): void ns3::DataCollectionObject::SetName(std::string name) [member function]
cls.add_method('SetName',
'void',
[param('std::string', 'name')])
return
def register_Ns3DataCollector_methods(root_module, cls):
## data-collector.h (module 'stats'): ns3::DataCollector::DataCollector(ns3::DataCollector const & arg0) [constructor]
cls.add_constructor([param('ns3::DataCollector const &', 'arg0')])
## data-collector.h (module 'stats'): ns3::DataCollector::DataCollector() [constructor]
cls.add_constructor([])
## data-collector.h (module 'stats'): void ns3::DataCollector::AddDataCalculator(ns3::Ptr<ns3::DataCalculator> datac) [member function]
cls.add_method('AddDataCalculator',
'void',
[param('ns3::Ptr< ns3::DataCalculator >', 'datac')])
## data-collector.h (module 'stats'): void ns3::DataCollector::AddMetadata(std::string key, std::string value) [member function]
cls.add_method('AddMetadata',
'void',
[param('std::string', 'key'), param('std::string', 'value')])
## data-collector.h (module 'stats'): void ns3::DataCollector::AddMetadata(std::string key, double value) [member function]
cls.add_method('AddMetadata',
'void',
[param('std::string', 'key'), param('double', 'value')])
## data-collector.h (module 'stats'): void ns3::DataCollector::AddMetadata(std::string key, uint32_t value) [member function]
cls.add_method('AddMetadata',
'void',
[param('std::string', 'key'), param('uint32_t', 'value')])
## data-collector.h (module 'stats'): std::list<ns3::Ptr<ns3::DataCalculator>, std::allocator<ns3::Ptr<ns3::DataCalculator> > >::iterator ns3::DataCollector::DataCalculatorBegin() [member function]
cls.add_method('DataCalculatorBegin',
'std::list< ns3::Ptr< ns3::DataCalculator > > iterator',
[])
## data-collector.h (module 'stats'): std::list<ns3::Ptr<ns3::DataCalculator>, std::allocator<ns3::Ptr<ns3::DataCalculator> > >::iterator ns3::DataCollector::DataCalculatorEnd() [member function]
cls.add_method('DataCalculatorEnd',
'std::list< ns3::Ptr< ns3::DataCalculator > > iterator',
[])
## data-collector.h (module 'stats'): void ns3::DataCollector::DescribeRun(std::string experiment, std::string strategy, std::string input, std::string runID, std::string description="") [member function]
cls.add_method('DescribeRun',
'void',
[param('std::string', 'experiment'), param('std::string', 'strategy'), param('std::string', 'input'), param('std::string', 'runID'), param('std::string', 'description', default_value='""')])
## data-collector.h (module 'stats'): std::string ns3::DataCollector::GetDescription() const [member function]
cls.add_method('GetDescription',
'std::string',
[],
is_const=True)
## data-collector.h (module 'stats'): std::string ns3::DataCollector::GetExperimentLabel() const [member function]
cls.add_method('GetExperimentLabel',
'std::string',
[],
is_const=True)
## data-collector.h (module 'stats'): std::string ns3::DataCollector::GetInputLabel() const [member function]
cls.add_method('GetInputLabel',
'std::string',
[],
is_const=True)
## data-collector.h (module 'stats'): std::string ns3::DataCollector::GetRunLabel() const [member function]
cls.add_method('GetRunLabel',
'std::string',
[],
is_const=True)
## data-collector.h (module 'stats'): std::string ns3::DataCollector::GetStrategyLabel() const [member function]
cls.add_method('GetStrategyLabel',
'std::string',
[],
is_const=True)
## data-collector.h (module 'stats'): static ns3::TypeId ns3::DataCollector::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## data-collector.h (module 'stats'): std::list<std::pair<std::basic_string<char>, std::basic_string<char> >, std::allocator<std::pair<std::basic_string<char>, std::basic_string<char> > > >::iterator ns3::DataCollector::MetadataBegin() [member function]
cls.add_method('MetadataBegin',
'std::list< std::pair< std::string, std::string > > iterator',
[])
## data-collector.h (module 'stats'): std::list<std::pair<std::basic_string<char>, std::basic_string<char> >, std::allocator<std::pair<std::basic_string<char>, std::basic_string<char> > > >::iterator ns3::DataCollector::MetadataEnd() [member function]
cls.add_method('MetadataEnd',
'std::list< std::pair< std::string, std::string > > iterator',
[])
## data-collector.h (module 'stats'): void ns3::DataCollector::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True, visibility='protected')
return
def register_Ns3DataOutputInterface_methods(root_module, cls):
## data-output-interface.h (module 'stats'): ns3::DataOutputInterface::DataOutputInterface(ns3::DataOutputInterface const & arg0) [constructor]
cls.add_constructor([param('ns3::DataOutputInterface const &', 'arg0')])
## data-output-interface.h (module 'stats'): ns3::DataOutputInterface::DataOutputInterface() [constructor]
cls.add_constructor([])
## data-output-interface.h (module 'stats'): std::string ns3::DataOutputInterface::GetFilePrefix() const [member function]
cls.add_method('GetFilePrefix',
'std::string',
[],
is_const=True)
## data-output-interface.h (module 'stats'): static ns3::TypeId ns3::DataOutputInterface::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## data-output-interface.h (module 'stats'): void ns3::DataOutputInterface::Output(ns3::DataCollector & dc) [member function]
cls.add_method('Output',
'void',
[param('ns3::DataCollector &', 'dc')],
is_pure_virtual=True, is_virtual=True)
## data-output-interface.h (module 'stats'): void ns3::DataOutputInterface::SetFilePrefix(std::string const prefix) [member function]
cls.add_method('SetFilePrefix',
'void',
[param('std::string const', 'prefix')])
## data-output-interface.h (module 'stats'): void ns3::DataOutputInterface::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True, visibility='protected')
return
def register_Ns3DoubleValue_methods(root_module, cls):
## double.h (module 'core'): ns3::DoubleValue::DoubleValue() [constructor]
cls.add_constructor([])
## double.h (module 'core'): ns3::DoubleValue::DoubleValue(double const & value) [constructor]
cls.add_constructor([param('double const &', 'value')])
## double.h (module 'core'): ns3::DoubleValue::DoubleValue(ns3::DoubleValue const & arg0) [constructor]
cls.add_constructor([param('ns3::DoubleValue const &', 'arg0')])
## double.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::DoubleValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## double.h (module 'core'): bool ns3::DoubleValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## double.h (module 'core'): double ns3::DoubleValue::Get() const [member function]
cls.add_method('Get',
'double',
[],
is_const=True)
## double.h (module 'core'): std::string ns3::DoubleValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## double.h (module 'core'): void ns3::DoubleValue::Set(double const & value) [member function]
cls.add_method('Set',
'void',
[param('double const &', 'value')])
return
def register_Ns3EmptyAttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor(ns3::EmptyAttributeAccessor const & arg0) [constructor]
cls.add_constructor([param('ns3::EmptyAttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeAccessor::EmptyAttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object'), param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker(ns3::EmptyAttributeChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::EmptyAttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeChecker::EmptyAttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_virtual=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True, visibility='private')
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True, visibility='private')
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True, visibility='private')
return
def register_Ns3EnumChecker_methods(root_module, cls):
## enum.h (module 'core'): ns3::EnumChecker::EnumChecker(ns3::EnumChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::EnumChecker const &', 'arg0')])
## enum.h (module 'core'): ns3::EnumChecker::EnumChecker() [constructor]
cls.add_constructor([])
## enum.h (module 'core'): void ns3::EnumChecker::Add(int value, std::string name) [member function]
cls.add_method('Add',
'void',
[param('int', 'value'), param('std::string', 'name')])
## enum.h (module 'core'): void ns3::EnumChecker::AddDefault(int value, std::string name) [member function]
cls.add_method('AddDefault',
'void',
[param('int', 'value'), param('std::string', 'name')])
## enum.h (module 'core'): bool ns3::EnumChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## enum.h (module 'core'): bool ns3::EnumChecker::Copy(ns3::AttributeValue const & src, ns3::AttributeValue & dst) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'src'), param('ns3::AttributeValue &', 'dst')],
is_const=True, is_virtual=True)
## enum.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EnumChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): std::string ns3::EnumChecker::GetName(int value) const [member function]
cls.add_method('GetName',
'std::string',
[param('int', 'value')],
is_const=True)
## enum.h (module 'core'): std::string ns3::EnumChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): int ns3::EnumChecker::GetValue(std::string const name) const [member function]
cls.add_method('GetValue',
'int',
[param('std::string const', 'name')],
is_const=True)
## enum.h (module 'core'): std::string ns3::EnumChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): bool ns3::EnumChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_virtual=True)
return
def register_Ns3EnumValue_methods(root_module, cls):
## enum.h (module 'core'): ns3::EnumValue::EnumValue(ns3::EnumValue const & arg0) [constructor]
cls.add_constructor([param('ns3::EnumValue const &', 'arg0')])
## enum.h (module 'core'): ns3::EnumValue::EnumValue() [constructor]
cls.add_constructor([])
## enum.h (module 'core'): ns3::EnumValue::EnumValue(int value) [constructor]
cls.add_constructor([param('int', 'value')])
## enum.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EnumValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## enum.h (module 'core'): bool ns3::EnumValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## enum.h (module 'core'): int ns3::EnumValue::Get() const [member function]
cls.add_method('Get',
'int',
[],
is_const=True)
## enum.h (module 'core'): std::string ns3::EnumValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## enum.h (module 'core'): void ns3::EnumValue::Set(int value) [member function]
cls.add_method('Set',
'void',
[param('int', 'value')])
return
def register_Ns3EventImpl_methods(root_module, cls):
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl(ns3::EventImpl const & arg0) [constructor]
cls.add_constructor([param('ns3::EventImpl const &', 'arg0')])
## event-impl.h (module 'core'): ns3::EventImpl::EventImpl() [constructor]
cls.add_constructor([])
## event-impl.h (module 'core'): void ns3::EventImpl::Cancel() [member function]
cls.add_method('Cancel',
'void',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Invoke() [member function]
cls.add_method('Invoke',
'void',
[])
## event-impl.h (module 'core'): bool ns3::EventImpl::IsCancelled() [member function]
cls.add_method('IsCancelled',
'bool',
[])
## event-impl.h (module 'core'): void ns3::EventImpl::Notify() [member function]
cls.add_method('Notify',
'void',
[],
is_pure_virtual=True, is_virtual=True, visibility='protected')
return
def register_Ns3FileAggregator_methods(root_module, cls):
## file-aggregator.h (module 'stats'): static ns3::TypeId ns3::FileAggregator::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## file-aggregator.h (module 'stats'): ns3::FileAggregator::FileAggregator(std::string const & outputFileName, ns3::FileAggregator::FileType fileType=::ns3::FileAggregator::FileType::SPACE_SEPARATED) [constructor]
cls.add_constructor([param('std::string const &', 'outputFileName'), param('ns3::FileAggregator::FileType', 'fileType', default_value='::ns3::FileAggregator::FileType::SPACE_SEPARATED')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::SetFileType(ns3::FileAggregator::FileType fileType) [member function]
cls.add_method('SetFileType',
'void',
[param('ns3::FileAggregator::FileType', 'fileType')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::SetHeading(std::string const & heading) [member function]
cls.add_method('SetHeading',
'void',
[param('std::string const &', 'heading')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Set1dFormat(std::string const & format) [member function]
cls.add_method('Set1dFormat',
'void',
[param('std::string const &', 'format')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Set2dFormat(std::string const & format) [member function]
cls.add_method('Set2dFormat',
'void',
[param('std::string const &', 'format')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Set3dFormat(std::string const & format) [member function]
cls.add_method('Set3dFormat',
'void',
[param('std::string const &', 'format')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Set4dFormat(std::string const & format) [member function]
cls.add_method('Set4dFormat',
'void',
[param('std::string const &', 'format')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Set5dFormat(std::string const & format) [member function]
cls.add_method('Set5dFormat',
'void',
[param('std::string const &', 'format')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Set6dFormat(std::string const & format) [member function]
cls.add_method('Set6dFormat',
'void',
[param('std::string const &', 'format')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Set7dFormat(std::string const & format) [member function]
cls.add_method('Set7dFormat',
'void',
[param('std::string const &', 'format')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Set8dFormat(std::string const & format) [member function]
cls.add_method('Set8dFormat',
'void',
[param('std::string const &', 'format')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Set9dFormat(std::string const & format) [member function]
cls.add_method('Set9dFormat',
'void',
[param('std::string const &', 'format')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Set10dFormat(std::string const & format) [member function]
cls.add_method('Set10dFormat',
'void',
[param('std::string const &', 'format')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Write1d(std::string context, double v1) [member function]
cls.add_method('Write1d',
'void',
[param('std::string', 'context'), param('double', 'v1')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Write2d(std::string context, double v1, double v2) [member function]
cls.add_method('Write2d',
'void',
[param('std::string', 'context'), param('double', 'v1'), param('double', 'v2')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Write3d(std::string context, double v1, double v2, double v3) [member function]
cls.add_method('Write3d',
'void',
[param('std::string', 'context'), param('double', 'v1'), param('double', 'v2'), param('double', 'v3')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Write4d(std::string context, double v1, double v2, double v3, double v4) [member function]
cls.add_method('Write4d',
'void',
[param('std::string', 'context'), param('double', 'v1'), param('double', 'v2'), param('double', 'v3'), param('double', 'v4')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Write5d(std::string context, double v1, double v2, double v3, double v4, double v5) [member function]
cls.add_method('Write5d',
'void',
[param('std::string', 'context'), param('double', 'v1'), param('double', 'v2'), param('double', 'v3'), param('double', 'v4'), param('double', 'v5')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Write6d(std::string context, double v1, double v2, double v3, double v4, double v5, double v6) [member function]
cls.add_method('Write6d',
'void',
[param('std::string', 'context'), param('double', 'v1'), param('double', 'v2'), param('double', 'v3'), param('double', 'v4'), param('double', 'v5'), param('double', 'v6')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Write7d(std::string context, double v1, double v2, double v3, double v4, double v5, double v6, double v7) [member function]
cls.add_method('Write7d',
'void',
[param('std::string', 'context'), param('double', 'v1'), param('double', 'v2'), param('double', 'v3'), param('double', 'v4'), param('double', 'v5'), param('double', 'v6'), param('double', 'v7')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Write8d(std::string context, double v1, double v2, double v3, double v4, double v5, double v6, double v7, double v8) [member function]
cls.add_method('Write8d',
'void',
[param('std::string', 'context'), param('double', 'v1'), param('double', 'v2'), param('double', 'v3'), param('double', 'v4'), param('double', 'v5'), param('double', 'v6'), param('double', 'v7'), param('double', 'v8')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Write9d(std::string context, double v1, double v2, double v3, double v4, double v5, double v6, double v7, double v8, double v9) [member function]
cls.add_method('Write9d',
'void',
[param('std::string', 'context'), param('double', 'v1'), param('double', 'v2'), param('double', 'v3'), param('double', 'v4'), param('double', 'v5'), param('double', 'v6'), param('double', 'v7'), param('double', 'v8'), param('double', 'v9')])
## file-aggregator.h (module 'stats'): void ns3::FileAggregator::Write10d(std::string context, double v1, double v2, double v3, double v4, double v5, double v6, double v7, double v8, double v9, double v10) [member function]
cls.add_method('Write10d',
'void',
[param('std::string', 'context'), param('double', 'v1'), param('double', 'v2'), param('double', 'v3'), param('double', 'v4'), param('double', 'v5'), param('double', 'v6'), param('double', 'v7'), param('double', 'v8'), param('double', 'v9'), param('double', 'v10')])
return
def register_Ns3GnuplotAggregator_methods(root_module, cls):
## gnuplot-aggregator.h (module 'stats'): ns3::GnuplotAggregator::GnuplotAggregator(ns3::GnuplotAggregator const & arg0) [constructor]
cls.add_constructor([param('ns3::GnuplotAggregator const &', 'arg0')])
## gnuplot-aggregator.h (module 'stats'): ns3::GnuplotAggregator::GnuplotAggregator(std::string const & outputFileNameWithoutExtension) [constructor]
cls.add_constructor([param('std::string const &', 'outputFileNameWithoutExtension')])
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::Add2dDataset(std::string const & dataset, std::string const & title) [member function]
cls.add_method('Add2dDataset',
'void',
[param('std::string const &', 'dataset'), param('std::string const &', 'title')])
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::AppendExtra(std::string const & extra) [member function]
cls.add_method('AppendExtra',
'void',
[param('std::string const &', 'extra')])
## gnuplot-aggregator.h (module 'stats'): static ns3::TypeId ns3::GnuplotAggregator::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## gnuplot-aggregator.h (module 'stats'): static void ns3::GnuplotAggregator::Set2dDatasetDefaultErrorBars(ns3::Gnuplot2dDataset::ErrorBars errorBars) [member function]
cls.add_method('Set2dDatasetDefaultErrorBars',
'void',
[param('ns3::Gnuplot2dDataset::ErrorBars', 'errorBars')],
is_static=True)
## gnuplot-aggregator.h (module 'stats'): static void ns3::GnuplotAggregator::Set2dDatasetDefaultExtra(std::string const & extra) [member function]
cls.add_method('Set2dDatasetDefaultExtra',
'void',
[param('std::string const &', 'extra')],
is_static=True)
## gnuplot-aggregator.h (module 'stats'): static void ns3::GnuplotAggregator::Set2dDatasetDefaultStyle(ns3::Gnuplot2dDataset::Style style) [member function]
cls.add_method('Set2dDatasetDefaultStyle',
'void',
[param('ns3::Gnuplot2dDataset::Style', 'style')],
is_static=True)
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::Set2dDatasetErrorBars(std::string const & dataset, ns3::Gnuplot2dDataset::ErrorBars errorBars) [member function]
cls.add_method('Set2dDatasetErrorBars',
'void',
[param('std::string const &', 'dataset'), param('ns3::Gnuplot2dDataset::ErrorBars', 'errorBars')])
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::Set2dDatasetExtra(std::string const & dataset, std::string const & extra) [member function]
cls.add_method('Set2dDatasetExtra',
'void',
[param('std::string const &', 'dataset'), param('std::string const &', 'extra')])
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::Set2dDatasetStyle(std::string const & dataset, ns3::Gnuplot2dDataset::Style style) [member function]
cls.add_method('Set2dDatasetStyle',
'void',
[param('std::string const &', 'dataset'), param('ns3::Gnuplot2dDataset::Style', 'style')])
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::SetExtra(std::string const & extra) [member function]
cls.add_method('SetExtra',
'void',
[param('std::string const &', 'extra')])
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::SetKeyLocation(ns3::GnuplotAggregator::KeyLocation keyLocation) [member function]
cls.add_method('SetKeyLocation',
'void',
[param('ns3::GnuplotAggregator::KeyLocation', 'keyLocation')])
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::SetLegend(std::string const & xLegend, std::string const & yLegend) [member function]
cls.add_method('SetLegend',
'void',
[param('std::string const &', 'xLegend'), param('std::string const &', 'yLegend')])
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::SetTerminal(std::string const & terminal) [member function]
cls.add_method('SetTerminal',
'void',
[param('std::string const &', 'terminal')])
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::SetTitle(std::string const & title) [member function]
cls.add_method('SetTitle',
'void',
[param('std::string const &', 'title')])
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::Write2d(std::string context, double x, double y) [member function]
cls.add_method('Write2d',
'void',
[param('std::string', 'context'), param('double', 'x'), param('double', 'y')])
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::Write2dDatasetEmptyLine(std::string const & dataset) [member function]
cls.add_method('Write2dDatasetEmptyLine',
'void',
[param('std::string const &', 'dataset')])
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::Write2dWithXErrorDelta(std::string context, double x, double y, double errorDelta) [member function]
cls.add_method('Write2dWithXErrorDelta',
'void',
[param('std::string', 'context'), param('double', 'x'), param('double', 'y'), param('double', 'errorDelta')])
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::Write2dWithXYErrorDelta(std::string context, double x, double y, double xErrorDelta, double yErrorDelta) [member function]
cls.add_method('Write2dWithXYErrorDelta',
'void',
[param('std::string', 'context'), param('double', 'x'), param('double', 'y'), param('double', 'xErrorDelta'), param('double', 'yErrorDelta')])
## gnuplot-aggregator.h (module 'stats'): void ns3::GnuplotAggregator::Write2dWithYErrorDelta(std::string context, double x, double y, double errorDelta) [member function]
cls.add_method('Write2dWithYErrorDelta',
'void',
[param('std::string', 'context'), param('double', 'x'), param('double', 'y'), param('double', 'errorDelta')])
return
def register_Ns3IntegerValue_methods(root_module, cls):
## integer.h (module 'core'): ns3::IntegerValue::IntegerValue() [constructor]
cls.add_constructor([])
## integer.h (module 'core'): ns3::IntegerValue::IntegerValue(int64_t const & value) [constructor]
cls.add_constructor([param('int64_t const &', 'value')])
## integer.h (module 'core'): ns3::IntegerValue::IntegerValue(ns3::IntegerValue const & arg0) [constructor]
cls.add_constructor([param('ns3::IntegerValue const &', 'arg0')])
## integer.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::IntegerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## integer.h (module 'core'): bool ns3::IntegerValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## integer.h (module 'core'): int64_t ns3::IntegerValue::Get() const [member function]
cls.add_method('Get',
'int64_t',
[],
is_const=True)
## integer.h (module 'core'): std::string ns3::IntegerValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## integer.h (module 'core'): void ns3::IntegerValue::Set(int64_t const & value) [member function]
cls.add_method('Set',
'void',
[param('int64_t const &', 'value')])
return
def register_Ns3MinMaxAvgTotalCalculator__Double_methods(root_module, cls):
## basic-data-calculators.h (module 'stats'): ns3::MinMaxAvgTotalCalculator<double>::MinMaxAvgTotalCalculator(ns3::MinMaxAvgTotalCalculator<double> const & arg0) [constructor]
cls.add_constructor([param('ns3::MinMaxAvgTotalCalculator< double > const &', 'arg0')])
## basic-data-calculators.h (module 'stats'): ns3::MinMaxAvgTotalCalculator<double>::MinMaxAvgTotalCalculator() [constructor]
cls.add_constructor([])
## basic-data-calculators.h (module 'stats'): static ns3::TypeId ns3::MinMaxAvgTotalCalculator<double>::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## basic-data-calculators.h (module 'stats'): void ns3::MinMaxAvgTotalCalculator<double>::Output(ns3::DataOutputCallback & callback) const [member function]
cls.add_method('Output',
'void',
[param('ns3::DataOutputCallback &', 'callback')],
is_const=True, is_virtual=True)
## basic-data-calculators.h (module 'stats'): void ns3::MinMaxAvgTotalCalculator<double>::Reset() [member function]
cls.add_method('Reset',
'void',
[])
## basic-data-calculators.h (module 'stats'): void ns3::MinMaxAvgTotalCalculator<double>::Update(double const i) [member function]
cls.add_method('Update',
'void',
[param('double const', 'i')])
## basic-data-calculators.h (module 'stats'): long int ns3::MinMaxAvgTotalCalculator<double>::getCount() const [member function]
cls.add_method('getCount',
'long int',
[],
is_const=True, is_virtual=True)
## basic-data-calculators.h (module 'stats'): double ns3::MinMaxAvgTotalCalculator<double>::getMax() const [member function]
cls.add_method('getMax',
'double',
[],
is_const=True, is_virtual=True)
## basic-data-calculators.h (module 'stats'): double ns3::MinMaxAvgTotalCalculator<double>::getMean() const [member function]
cls.add_method('getMean',
'double',
[],
is_const=True, is_virtual=True)
## basic-data-calculators.h (module 'stats'): double ns3::MinMaxAvgTotalCalculator<double>::getMin() const [member function]
cls.add_method('getMin',
'double',
[],
is_const=True, is_virtual=True)
## basic-data-calculators.h (module 'stats'): double ns3::MinMaxAvgTotalCalculator<double>::getSqrSum() const [member function]
cls.add_method('getSqrSum',
'double',
[],
is_const=True, is_virtual=True)
## basic-data-calculators.h (module 'stats'): double ns3::MinMaxAvgTotalCalculator<double>::getStddev() const [member function]
cls.add_method('getStddev',
'double',
[],
is_const=True, is_virtual=True)
## basic-data-calculators.h (module 'stats'): double ns3::MinMaxAvgTotalCalculator<double>::getSum() const [member function]
cls.add_method('getSum',
'double',
[],
is_const=True, is_virtual=True)
## basic-data-calculators.h (module 'stats'): double ns3::MinMaxAvgTotalCalculator<double>::getVariance() const [member function]
cls.add_method('getVariance',
'double',
[],
is_const=True, is_virtual=True)
## basic-data-calculators.h (module 'stats'): void ns3::MinMaxAvgTotalCalculator<double>::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True, visibility='protected')
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3OmnetDataOutput_methods(root_module, cls):
## omnet-data-output.h (module 'stats'): ns3::OmnetDataOutput::OmnetDataOutput(ns3::OmnetDataOutput const & arg0) [constructor]
cls.add_constructor([param('ns3::OmnetDataOutput const &', 'arg0')])
## omnet-data-output.h (module 'stats'): ns3::OmnetDataOutput::OmnetDataOutput() [constructor]
cls.add_constructor([])
## omnet-data-output.h (module 'stats'): static ns3::TypeId ns3::OmnetDataOutput::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## omnet-data-output.h (module 'stats'): void ns3::OmnetDataOutput::Output(ns3::DataCollector & dc) [member function]
cls.add_method('Output',
'void',
[param('ns3::DataCollector &', 'dc')],
is_virtual=True)
## omnet-data-output.h (module 'stats'): void ns3::OmnetDataOutput::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True, visibility='protected')
return
def register_Ns3Probe_methods(root_module, cls):
## probe.h (module 'stats'): ns3::Probe::Probe(ns3::Probe const & arg0) [constructor]
cls.add_constructor([param('ns3::Probe const &', 'arg0')])
## probe.h (module 'stats'): ns3::Probe::Probe() [constructor]
cls.add_constructor([])
## probe.h (module 'stats'): bool ns3::Probe::ConnectByObject(std::string traceSource, ns3::Ptr<ns3::Object> obj) [member function]
cls.add_method('ConnectByObject',
'bool',
[param('std::string', 'traceSource'), param('ns3::Ptr< ns3::Object >', 'obj')],
is_pure_virtual=True, is_virtual=True)
## probe.h (module 'stats'): void ns3::Probe::ConnectByPath(std::string path) [member function]
cls.add_method('ConnectByPath',
'void',
[param('std::string', 'path')],
is_pure_virtual=True, is_virtual=True)
## probe.h (module 'stats'): static ns3::TypeId ns3::Probe::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## probe.h (module 'stats'): bool ns3::Probe::IsEnabled() const [member function]
cls.add_method('IsEnabled',
'bool',
[],
is_const=True, is_virtual=True)
return
def register_Ns3SQLiteOutput_methods(root_module, cls):
## sqlite-output.h (module 'stats'): ns3::SQLiteOutput::SQLiteOutput(ns3::SQLiteOutput const & arg0) [constructor]
cls.add_constructor([param('ns3::SQLiteOutput const &', 'arg0')])
## sqlite-output.h (module 'stats'): ns3::SQLiteOutput::SQLiteOutput(std::string const & name, std::string const & semName) [constructor]
cls.add_constructor([param('std::string const &', 'name'), param('std::string const &', 'semName')])
## sqlite-output.h (module 'stats'): void ns3::SQLiteOutput::SetJournalInMemory() [member function]
cls.add_method('SetJournalInMemory',
'void',
[])
## sqlite-output.h (module 'stats'): bool ns3::SQLiteOutput::SpinExec(std::string const & cmd) const [member function]
cls.add_method('SpinExec',
'bool',
[param('std::string const &', 'cmd')],
is_const=True)
## sqlite-output.h (module 'stats'): bool ns3::SQLiteOutput::SpinExec(sqlite3_stmt * stmt) const [member function]
cls.add_method('SpinExec',
'bool',
[param('sqlite3_stmt *', 'stmt')],
is_const=True)
## sqlite-output.h (module 'stats'): static int ns3::SQLiteOutput::SpinFinalize(sqlite3_stmt * stmt) [member function]
cls.add_method('SpinFinalize',
'int',
[param('sqlite3_stmt *', 'stmt')],
is_static=True)
## sqlite-output.h (module 'stats'): bool ns3::SQLiteOutput::SpinPrepare(sqlite3_stmt * * stmt, std::string const & cmd) const [member function]
cls.add_method('SpinPrepare',
'bool',
[param('sqlite3_stmt * *', 'stmt'), param('std::string const &', 'cmd')],
is_const=True)
## sqlite-output.h (module 'stats'): static int ns3::SQLiteOutput::SpinReset(sqlite3_stmt * stmt) [member function]
cls.add_method('SpinReset',
'int',
[param('sqlite3_stmt *', 'stmt')],
is_static=True)
## sqlite-output.h (module 'stats'): static int ns3::SQLiteOutput::SpinStep(sqlite3_stmt * stmt) [member function]
cls.add_method('SpinStep',
'int',
[param('sqlite3_stmt *', 'stmt')],
is_static=True)
## sqlite-output.h (module 'stats'): bool ns3::SQLiteOutput::WaitExec(std::string const & cmd) const [member function]
cls.add_method('WaitExec',
'bool',
[param('std::string const &', 'cmd')],
is_const=True)
## sqlite-output.h (module 'stats'): bool ns3::SQLiteOutput::WaitExec(sqlite3_stmt * stmt) const [member function]
cls.add_method('WaitExec',
'bool',
[param('sqlite3_stmt *', 'stmt')],
is_const=True)
## sqlite-output.h (module 'stats'): bool ns3::SQLiteOutput::WaitPrepare(sqlite3_stmt * * stmt, std::string const & cmd) const [member function]
cls.add_method('WaitPrepare',
'bool',
[param('sqlite3_stmt * *', 'stmt'), param('std::string const &', 'cmd')],
is_const=True)
## sqlite-output.h (module 'stats'): static bool ns3::SQLiteOutput::CheckError(sqlite3 * db, int rc, std::string const & cmd, sem_t * sem, bool hardExit) [member function]
cls.add_method('CheckError',
'bool',
[param('sqlite3 *', 'db'), param('int', 'rc'), param('std::string const &', 'cmd'), param('sem_t *', 'sem'), param('bool', 'hardExit')],
is_static=True, visibility='protected')
## sqlite-output.h (module 'stats'): static void ns3::SQLiteOutput::Error(sqlite3 * db, std::string const & cmd) [member function]
cls.add_method('Error',
'void',
[param('sqlite3 *', 'db'), param('std::string const &', 'cmd')],
is_static=True, visibility='protected')
## sqlite-output.h (module 'stats'): static int ns3::SQLiteOutput::SpinExec(sqlite3 * db, std::string const & cmd) [member function]
cls.add_method('SpinExec',
'int',
[param('sqlite3 *', 'db'), param('std::string const &', 'cmd')],
is_static=True, visibility='protected')
## sqlite-output.h (module 'stats'): static int ns3::SQLiteOutput::SpinExec(sqlite3 * db, sqlite3_stmt * stmt) [member function]
cls.add_method('SpinExec',
'int',
[param('sqlite3 *', 'db'), param('sqlite3_stmt *', 'stmt')],
is_static=True, visibility='protected')
## sqlite-output.h (module 'stats'): static int ns3::SQLiteOutput::SpinPrepare(sqlite3 * db, sqlite3_stmt * * stmt, std::string const & cmd) [member function]
cls.add_method('SpinPrepare',
'int',
[param('sqlite3 *', 'db'), param('sqlite3_stmt * *', 'stmt'), param('std::string const &', 'cmd')],
is_static=True, visibility='protected')
## sqlite-output.h (module 'stats'): int ns3::SQLiteOutput::WaitExec(sqlite3 * db, std::string const & cmd) const [member function]
cls.add_method('WaitExec',
'int',
[param('sqlite3 *', 'db'), param('std::string const &', 'cmd')],
is_const=True, visibility='protected')
## sqlite-output.h (module 'stats'): int ns3::SQLiteOutput::WaitExec(sqlite3 * db, sqlite3_stmt * stmt) const [member function]
cls.add_method('WaitExec',
'int',
[param('sqlite3 *', 'db'), param('sqlite3_stmt *', 'stmt')],
is_const=True, visibility='protected')
## sqlite-output.h (module 'stats'): int ns3::SQLiteOutput::WaitPrepare(sqlite3 * db, sqlite3_stmt * * stmt, std::string const & cmd) const [member function]
cls.add_method('WaitPrepare',
'int',
[param('sqlite3 *', 'db'), param('sqlite3_stmt * *', 'stmt'), param('std::string const &', 'cmd')],
is_const=True, visibility='protected')
return
def register_Ns3SqliteDataOutput_methods(root_module, cls):
## sqlite-data-output.h (module 'stats'): ns3::SqliteDataOutput::SqliteDataOutput(ns3::SqliteDataOutput const & arg0) [constructor]
cls.add_constructor([param('ns3::SqliteDataOutput const &', 'arg0')])
## sqlite-data-output.h (module 'stats'): ns3::SqliteDataOutput::SqliteDataOutput() [constructor]
cls.add_constructor([])
## sqlite-data-output.h (module 'stats'): static ns3::TypeId ns3::SqliteDataOutput::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## sqlite-data-output.h (module 'stats'): void ns3::SqliteDataOutput::Output(ns3::DataCollector & dc) [member function]
cls.add_method('Output',
'void',
[param('ns3::DataCollector &', 'dc')],
is_virtual=True)
return
def register_Ns3TimeMinMaxAvgTotalCalculator_methods(root_module, cls):
## time-data-calculators.h (module 'stats'): ns3::TimeMinMaxAvgTotalCalculator::TimeMinMaxAvgTotalCalculator(ns3::TimeMinMaxAvgTotalCalculator const & arg0) [constructor]
cls.add_constructor([param('ns3::TimeMinMaxAvgTotalCalculator const &', 'arg0')])
## time-data-calculators.h (module 'stats'): ns3::TimeMinMaxAvgTotalCalculator::TimeMinMaxAvgTotalCalculator() [constructor]
cls.add_constructor([])
## time-data-calculators.h (module 'stats'): static ns3::TypeId ns3::TimeMinMaxAvgTotalCalculator::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## time-data-calculators.h (module 'stats'): void ns3::TimeMinMaxAvgTotalCalculator::Output(ns3::DataOutputCallback & callback) const [member function]
cls.add_method('Output',
'void',
[param('ns3::DataOutputCallback &', 'callback')],
is_const=True, is_virtual=True)
## time-data-calculators.h (module 'stats'): void ns3::TimeMinMaxAvgTotalCalculator::Update(ns3::Time const i) [member function]
cls.add_method('Update',
'void',
[param('ns3::Time const', 'i')])
## time-data-calculators.h (module 'stats'): void ns3::TimeMinMaxAvgTotalCalculator::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True, visibility='protected')
return
def register_Ns3TimeProbe_methods(root_module, cls):
## time-probe.h (module 'stats'): ns3::TimeProbe::TimeProbe(ns3::TimeProbe const & arg0) [constructor]
cls.add_constructor([param('ns3::TimeProbe const &', 'arg0')])
## time-probe.h (module 'stats'): ns3::TimeProbe::TimeProbe() [constructor]
cls.add_constructor([])
## time-probe.h (module 'stats'): bool ns3::TimeProbe::ConnectByObject(std::string traceSource, ns3::Ptr<ns3::Object> obj) [member function]
cls.add_method('ConnectByObject',
'bool',
[param('std::string', 'traceSource'), param('ns3::Ptr< ns3::Object >', 'obj')],
is_virtual=True)
## time-probe.h (module 'stats'): void ns3::TimeProbe::ConnectByPath(std::string path) [member function]
cls.add_method('ConnectByPath',
'void',
[param('std::string', 'path')],
is_virtual=True)
## time-probe.h (module 'stats'): static ns3::TypeId ns3::TimeProbe::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## time-probe.h (module 'stats'): double ns3::TimeProbe::GetValue() const [member function]
cls.add_method('GetValue',
'double',
[],
is_const=True)
## time-probe.h (module 'stats'): void ns3::TimeProbe::SetValue(ns3::Time value) [member function]
cls.add_method('SetValue',
'void',
[param('ns3::Time', 'value')])
## time-probe.h (module 'stats'): static void ns3::TimeProbe::SetValueByPath(std::string path, ns3::Time value) [member function]
cls.add_method('SetValueByPath',
'void',
[param('std::string', 'path'), param('ns3::Time', 'value')],
is_static=True)
return
def register_Ns3TimeSeriesAdaptor_methods(root_module, cls):
## time-series-adaptor.h (module 'stats'): ns3::TimeSeriesAdaptor::TimeSeriesAdaptor(ns3::TimeSeriesAdaptor const & arg0) [constructor]
cls.add_constructor([param('ns3::TimeSeriesAdaptor const &', 'arg0')])
## time-series-adaptor.h (module 'stats'): ns3::TimeSeriesAdaptor::TimeSeriesAdaptor() [constructor]
cls.add_constructor([])
## time-series-adaptor.h (module 'stats'): static ns3::TypeId ns3::TimeSeriesAdaptor::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## time-series-adaptor.h (module 'stats'): void ns3::TimeSeriesAdaptor::TraceSinkBoolean(bool oldData, bool newData) [member function]
cls.add_method('TraceSinkBoolean',
'void',
[param('bool', 'oldData'), param('bool', 'newData')])
## time-series-adaptor.h (module 'stats'): void ns3::TimeSeriesAdaptor::TraceSinkDouble(double oldData, double newData) [member function]
cls.add_method('TraceSinkDouble',
'void',
[param('double', 'oldData'), param('double', 'newData')])
## time-series-adaptor.h (module 'stats'): void ns3::TimeSeriesAdaptor::TraceSinkUinteger16(uint16_t oldData, uint16_t newData) [member function]
cls.add_method('TraceSinkUinteger16',
'void',
[param('uint16_t', 'oldData'), param('uint16_t', 'newData')])
## time-series-adaptor.h (module 'stats'): void ns3::TimeSeriesAdaptor::TraceSinkUinteger32(uint32_t oldData, uint32_t newData) [member function]
cls.add_method('TraceSinkUinteger32',
'void',
[param('uint32_t', 'oldData'), param('uint32_t', 'newData')])
## time-series-adaptor.h (module 'stats'): void ns3::TimeSeriesAdaptor::TraceSinkUinteger8(uint8_t oldData, uint8_t newData) [member function]
cls.add_method('TraceSinkUinteger8',
'void',
[param('uint8_t', 'oldData'), param('uint8_t', 'newData')])
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3Uinteger16Probe_methods(root_module, cls):
## uinteger-16-probe.h (module 'stats'): ns3::Uinteger16Probe::Uinteger16Probe(ns3::Uinteger16Probe const & arg0) [constructor]
cls.add_constructor([param('ns3::Uinteger16Probe const &', 'arg0')])
## uinteger-16-probe.h (module 'stats'): ns3::Uinteger16Probe::Uinteger16Probe() [constructor]
cls.add_constructor([])
## uinteger-16-probe.h (module 'stats'): bool ns3::Uinteger16Probe::ConnectByObject(std::string traceSource, ns3::Ptr<ns3::Object> obj) [member function]
cls.add_method('ConnectByObject',
'bool',
[param('std::string', 'traceSource'), param('ns3::Ptr< ns3::Object >', 'obj')],
is_virtual=True)
## uinteger-16-probe.h (module 'stats'): void ns3::Uinteger16Probe::ConnectByPath(std::string path) [member function]
cls.add_method('ConnectByPath',
'void',
[param('std::string', 'path')],
is_virtual=True)
## uinteger-16-probe.h (module 'stats'): static ns3::TypeId ns3::Uinteger16Probe::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uinteger-16-probe.h (module 'stats'): uint16_t ns3::Uinteger16Probe::GetValue() const [member function]
cls.add_method('GetValue',
'uint16_t',
[],
is_const=True)
## uinteger-16-probe.h (module 'stats'): void ns3::Uinteger16Probe::SetValue(uint16_t value) [member function]
cls.add_method('SetValue',
'void',
[param('uint16_t', 'value')])
## uinteger-16-probe.h (module 'stats'): static void ns3::Uinteger16Probe::SetValueByPath(std::string path, uint16_t value) [member function]
cls.add_method('SetValueByPath',
'void',
[param('std::string', 'path'), param('uint16_t', 'value')],
is_static=True)
return
def register_Ns3Uinteger32Probe_methods(root_module, cls):
## uinteger-32-probe.h (module 'stats'): ns3::Uinteger32Probe::Uinteger32Probe(ns3::Uinteger32Probe const & arg0) [constructor]
cls.add_constructor([param('ns3::Uinteger32Probe const &', 'arg0')])
## uinteger-32-probe.h (module 'stats'): ns3::Uinteger32Probe::Uinteger32Probe() [constructor]
cls.add_constructor([])
## uinteger-32-probe.h (module 'stats'): bool ns3::Uinteger32Probe::ConnectByObject(std::string traceSource, ns3::Ptr<ns3::Object> obj) [member function]
cls.add_method('ConnectByObject',
'bool',
[param('std::string', 'traceSource'), param('ns3::Ptr< ns3::Object >', 'obj')],
is_virtual=True)
## uinteger-32-probe.h (module 'stats'): void ns3::Uinteger32Probe::ConnectByPath(std::string path) [member function]
cls.add_method('ConnectByPath',
'void',
[param('std::string', 'path')],
is_virtual=True)
## uinteger-32-probe.h (module 'stats'): static ns3::TypeId ns3::Uinteger32Probe::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uinteger-32-probe.h (module 'stats'): uint32_t ns3::Uinteger32Probe::GetValue() const [member function]
cls.add_method('GetValue',
'uint32_t',
[],
is_const=True)
## uinteger-32-probe.h (module 'stats'): void ns3::Uinteger32Probe::SetValue(uint32_t value) [member function]
cls.add_method('SetValue',
'void',
[param('uint32_t', 'value')])
## uinteger-32-probe.h (module 'stats'): static void ns3::Uinteger32Probe::SetValueByPath(std::string path, uint32_t value) [member function]
cls.add_method('SetValueByPath',
'void',
[param('std::string', 'path'), param('uint32_t', 'value')],
is_static=True)
return
def register_Ns3Uinteger8Probe_methods(root_module, cls):
## uinteger-8-probe.h (module 'stats'): ns3::Uinteger8Probe::Uinteger8Probe(ns3::Uinteger8Probe const & arg0) [constructor]
cls.add_constructor([param('ns3::Uinteger8Probe const &', 'arg0')])
## uinteger-8-probe.h (module 'stats'): ns3::Uinteger8Probe::Uinteger8Probe() [constructor]
cls.add_constructor([])
## uinteger-8-probe.h (module 'stats'): bool ns3::Uinteger8Probe::ConnectByObject(std::string traceSource, ns3::Ptr<ns3::Object> obj) [member function]
cls.add_method('ConnectByObject',
'bool',
[param('std::string', 'traceSource'), param('ns3::Ptr< ns3::Object >', 'obj')],
is_virtual=True)
## uinteger-8-probe.h (module 'stats'): void ns3::Uinteger8Probe::ConnectByPath(std::string path) [member function]
cls.add_method('ConnectByPath',
'void',
[param('std::string', 'path')],
is_virtual=True)
## uinteger-8-probe.h (module 'stats'): static ns3::TypeId ns3::Uinteger8Probe::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## uinteger-8-probe.h (module 'stats'): uint8_t ns3::Uinteger8Probe::GetValue() const [member function]
cls.add_method('GetValue',
'uint8_t',
[],
is_const=True)
## uinteger-8-probe.h (module 'stats'): void ns3::Uinteger8Probe::SetValue(uint8_t value) [member function]
cls.add_method('SetValue',
'void',
[param('uint8_t', 'value')])
## uinteger-8-probe.h (module 'stats'): static void ns3::Uinteger8Probe::SetValueByPath(std::string path, uint8_t value) [member function]
cls.add_method('SetValueByPath',
'void',
[param('std::string', 'path'), param('uint8_t', 'value')],
is_static=True)
return
def register_Ns3UintegerValue_methods(root_module, cls):
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue() [constructor]
cls.add_constructor([])
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue(uint64_t const & value) [constructor]
cls.add_constructor([param('uint64_t const &', 'value')])
## uinteger.h (module 'core'): ns3::UintegerValue::UintegerValue(ns3::UintegerValue const & arg0) [constructor]
cls.add_constructor([param('ns3::UintegerValue const &', 'arg0')])
## uinteger.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::UintegerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## uinteger.h (module 'core'): bool ns3::UintegerValue::DeserializeFromString(std::string value, ns3::Ptr<const ns3::AttributeChecker> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## uinteger.h (module 'core'): uint64_t ns3::UintegerValue::Get() const [member function]
cls.add_method('Get',
'uint64_t',
[],
is_const=True)
## uinteger.h (module 'core'): std::string ns3::UintegerValue::SerializeToString(ns3::Ptr<const ns3::AttributeChecker> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## uinteger.h (module 'core'): void ns3::UintegerValue::Set(uint64_t const & value) [member function]
cls.add_method('Set',
'void',
[param('uint64_t const &', 'value')])
return
def register_Ns3BooleanProbe_methods(root_module, cls):
## boolean-probe.h (module 'stats'): ns3::BooleanProbe::BooleanProbe(ns3::BooleanProbe const & arg0) [constructor]
cls.add_constructor([param('ns3::BooleanProbe const &', 'arg0')])
## boolean-probe.h (module 'stats'): ns3::BooleanProbe::BooleanProbe() [constructor]
cls.add_constructor([])
## boolean-probe.h (module 'stats'): bool ns3::BooleanProbe::ConnectByObject(std::string traceSource, ns3::Ptr<ns3::Object> obj) [member function]
cls.add_method('ConnectByObject',
'bool',
[param('std::string', 'traceSource'), param('ns3::Ptr< ns3::Object >', 'obj')],
is_virtual=True)
## boolean-probe.h (module 'stats'): void ns3::BooleanProbe::ConnectByPath(std::string path) [member function]
cls.add_method('ConnectByPath',
'void',
[param('std::string', 'path')],
is_virtual=True)
## boolean-probe.h (module 'stats'): static ns3::TypeId ns3::BooleanProbe::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## boolean-probe.h (module 'stats'): bool ns3::BooleanProbe::GetValue() const [member function]
cls.add_method('GetValue',
'bool',
[],
is_const=True)
## boolean-probe.h (module 'stats'): void ns3::BooleanProbe::SetValue(bool value) [member function]
cls.add_method('SetValue',
'void',
[param('bool', 'value')])
## boolean-probe.h (module 'stats'): static void ns3::BooleanProbe::SetValueByPath(std::string path, bool value) [member function]
cls.add_method('SetValueByPath',
'void',
[param('std::string', 'path'), param('bool', 'value')],
is_static=True)
return
def register_Ns3CallbackImpl__Ns3ObjectBase___star___Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): ns3::ObjectBase * ns3::CallbackImpl<ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()() [member operator]
cls.add_method('operator()',
'ns3::ObjectBase *',
[],
custom_name='__call__', is_pure_virtual=True, is_virtual=True)
return
def register_Ns3CallbackImpl__Void_Bool_Bool_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<void, bool, bool, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<void, bool, bool, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<void, bool, bool, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< void, bool, bool, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<void, bool, bool, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<void, bool, bool, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackImpl<void, bool, bool, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()(bool arg0, bool arg1) [member operator]
cls.add_method('operator()',
'void',
[param('bool', 'arg0'), param('bool', 'arg1')],
custom_name='__call__', is_pure_virtual=True, is_virtual=True)
return
def register_Ns3CallbackImpl__Void_Double_Double_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<void, double, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<void, double, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<void, double, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< void, double, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<void, double, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<void, double, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackImpl<void, double, double, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()(double arg0, double arg1) [member operator]
cls.add_method('operator()',
'void',
[param('double', 'arg0'), param('double', 'arg1')],
custom_name='__call__', is_pure_virtual=True, is_virtual=True)
return
def register_Ns3CallbackImpl__Void_Unsigned_char_Unsigned_char_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<void, unsigned char, unsigned char, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<void, unsigned char, unsigned char, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<void, unsigned char, unsigned char, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< void, unsigned char, unsigned char, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<void, unsigned char, unsigned char, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<void, unsigned char, unsigned char, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackImpl<void, unsigned char, unsigned char, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()(unsigned char arg0, unsigned char arg1) [member operator]
cls.add_method('operator()',
'void',
[param('unsigned char', 'arg0'), param('unsigned char', 'arg1')],
custom_name='__call__', is_pure_virtual=True, is_virtual=True)
return
def register_Ns3CallbackImpl__Void_Unsigned_int_Unsigned_int_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<void, unsigned int, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<void, unsigned int, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<void, unsigned int, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< void, unsigned int, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<void, unsigned int, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<void, unsigned int, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackImpl<void, unsigned int, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()(unsigned int arg0, unsigned int arg1) [member operator]
cls.add_method('operator()',
'void',
[param('unsigned int', 'arg0'), param('unsigned int', 'arg1')],
custom_name='__call__', is_pure_virtual=True, is_virtual=True)
return
def register_Ns3CallbackImpl__Void_Unsigned_short_Unsigned_short_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_Ns3Empty_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImpl<void, unsigned short, unsigned short, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImpl<void, unsigned short, unsigned short, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::CallbackImpl(ns3::CallbackImpl<void, unsigned short, unsigned short, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> const & arg0) [constructor]
cls.add_constructor([param('ns3::CallbackImpl< void, unsigned short, unsigned short, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty > const &', 'arg0')])
## callback.h (module 'core'): static std::string ns3::CallbackImpl<void, unsigned short, unsigned short, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::DoGetTypeid() [member function]
cls.add_method('DoGetTypeid',
'std::string',
[],
is_static=True)
## callback.h (module 'core'): std::string ns3::CallbackImpl<void, unsigned short, unsigned short, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackImpl<void, unsigned short, unsigned short, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty>::operator()(short unsigned int arg0, short unsigned int arg1) [member operator]
cls.add_method('operator()',
'void',
[param('short unsigned int', 'arg0'), param('short unsigned int', 'arg1')],
custom_name='__call__', is_pure_virtual=True, is_virtual=True)
return
def register_Ns3DoubleProbe_methods(root_module, cls):
## double-probe.h (module 'stats'): ns3::DoubleProbe::DoubleProbe(ns3::DoubleProbe const & arg0) [constructor]
cls.add_constructor([param('ns3::DoubleProbe const &', 'arg0')])
## double-probe.h (module 'stats'): ns3::DoubleProbe::DoubleProbe() [constructor]
cls.add_constructor([])
## double-probe.h (module 'stats'): bool ns3::DoubleProbe::ConnectByObject(std::string traceSource, ns3::Ptr<ns3::Object> obj) [member function]
cls.add_method('ConnectByObject',
'bool',
[param('std::string', 'traceSource'), param('ns3::Ptr< ns3::Object >', 'obj')],
is_virtual=True)
## double-probe.h (module 'stats'): void ns3::DoubleProbe::ConnectByPath(std::string path) [member function]
cls.add_method('ConnectByPath',
'void',
[param('std::string', 'path')],
is_virtual=True)
## double-probe.h (module 'stats'): static ns3::TypeId ns3::DoubleProbe::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## double-probe.h (module 'stats'): double ns3::DoubleProbe::GetValue() const [member function]
cls.add_method('GetValue',
'double',
[],
is_const=True)
## double-probe.h (module 'stats'): void ns3::DoubleProbe::SetValue(double value) [member function]
cls.add_method('SetValue',
'void',
[param('double', 'value')])
## double-probe.h (module 'stats'): static void ns3::DoubleProbe::SetValueByPath(std::string path, double value) [member function]
cls.add_method('SetValueByPath',
'void',
[param('std::string', 'path'), param('double', 'value')],
is_static=True)
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, std::size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('std::size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
## get-wildcard-matches.h (module 'stats'): std::string ns3::GetWildcardMatches(std::string const & configPath, std::string const & matchedPath, std::string const & wildcardSeparator=" ") [free function]
module.add_function('GetWildcardMatches',
'std::string',
[param('std::string const &', 'configPath'), param('std::string const &', 'matchedPath'), param('std::string const &', 'wildcardSeparator', default_value='" "')])
## data-calculator.h (module 'stats'): bool ns3::isNaN(double x) [free function]
module.add_function('isNaN',
'bool',
[param('double', 'x')])
register_functions_ns3_FatalImpl(module.add_cpp_namespace('FatalImpl'), root_module)
register_functions_ns3_Hash(module.add_cpp_namespace('Hash'), root_module)
register_functions_ns3_TracedValueCallback(module.add_cpp_namespace('TracedValueCallback'), root_module)
register_functions_ns3_internal(module.add_cpp_namespace('internal'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.add_cpp_namespace('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def register_functions_ns3_TracedValueCallback(module, root_module):
return
def register_functions_ns3_internal(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
|
nsnam/ns-3-dev-git
|
src/stats/bindings/modulegen__gcc_LP64.py
|
Python
|
gpl-2.0
| 278,809
|
import pytest
class TestArping:
@pytest.mark.complete("arping ")
def test_1(self, completion):
assert completion
@pytest.mark.complete("arping -", require_cmd=True)
def test_2(self, completion):
assert completion
|
scop/bash-completion
|
test/t/test_arping.py
|
Python
|
gpl-2.0
| 248
|
from unittest import TestCase
from pylons import request
from datetime import date
from bluechips.lib import helpers as h
class TestHelpers(TestCase):
def test_grab_real_object(self):
class Foo(object):
pass
foo = Foo()
foo.bar = 'some string'
assert h.grab(foo, 'bar') == 'some string'
try:
h.grab(foo, 'baz')
except AttributeError:
pass
else:
raise AssertionError
def test_grab_any_fake(self):
assert h.grab(None, 'nonexistent') == ''
assert h.grab('', 'nonexistent') == ''
def test_grab_date(self):
assert h.grab(None, 'date') == date.today()
def test_grab_user(self):
class FakeRequest(object):
pass
class FakeUser(object):
pass
class SomeObject(object):
pass
req = FakeRequest()
req.environ = {}
req.environ['user'] = FakeUser()
test_obj = SomeObject()
req.environ['user'].id = test_obj
request._push_object(req)
assert h.grab(None, 'spender_id') == test_obj
assert h.grab(None, 'creditor_id') == test_obj
assert h.grab(None, 'debtor_id') == test_obj
request._pop_object()
def test_grab_amount(self):
assert h.grab(None, 'amount') == 0
|
ebroder/bluechips
|
bluechips/tests/lib/test_helpers.py
|
Python
|
gpl-2.0
| 1,342
|
# -*- coding: UTF-8 -*-
# CCcam Info by AliAbdul
from base64 import encodestring
from os import listdir, remove, rename, system, popen, path
from enigma import eListboxPythonMultiContent, eTimer, gFont, loadPNG, RT_HALIGN_RIGHT, getDesktop
from Components.ActionMap import ActionMap, NumberActionMap
from Components.config import config, getConfigListEntry
from Components.ConfigList import ConfigListScreen
from Components.Console import Console
from Components.Label import Label
from Components.MenuList import MenuList
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest, MultiContentEntryPixmapAlphaBlend
from Components.ScrollLabel import ScrollLabel
from Screens.HelpMenu import HelpableScreen
#from Screens.InfoBar import InfoBar
from Screens.LocationBox import LocationBox
from Screens.MessageBox import MessageBox
from Screens.Screen import Screen
from Screens.VirtualKeyBoard import VirtualKeyBoard
from Tools.Directories import fileExists, SCOPE_ACTIVE_SKIN, resolveFilename
from twisted.internet import reactor
from twisted.web.client import HTTPClientFactory
from urlparse import urlparse, urlunparse
#TOGGLE_SHOW = InfoBar.toggleShow
VERSION = "v2"
DATE = "21.11.2014"
#############################################################
screenwidth = getDesktop(0).size().width()
def confPath():
search_dirs = [ "/usr", "/var", "/etc" ]
sdirs = " ".join(search_dirs)
cmd = 'find %s -name "CCcam.cfg" | head -n 1' % sdirs
res = popen(cmd).read()
if res == "":
return None
else:
return res.replace("\n", "")
def _parse(url):
url = url.strip()
parsed = urlparse(url)
scheme = parsed[0]
path = urlunparse(('','') + parsed[2:])
host, port = parsed[1], 80
if '@' in host:
username, host = host.split('@')
if ':' in username:
username, password = username.split(':')
else:
password = ""
else:
username = ""
password = ""
if ':' in host:
host, port = host.split(':')
port = int(port)
if path == "":
path = "/"
return scheme, host, port, path, username, password
def getPage(url, contextFactory=None, *args, **kwargs):
scheme, host, port, path, username, password = _parse(url)
if username and password:
url = scheme + '://' + host + ':' + str(port) + path
basicAuth = encodestring("%s:%s" % (username, password))
authHeader = "Basic " + basicAuth.strip()
AuthHeaders = {"Authorization": authHeader}
if kwargs.has_key("headers"):
kwargs["headers"].update(AuthHeaders)
else:
kwargs["headers"] = AuthHeaders
factory = HTTPClientFactory(url, *args, **kwargs)
reactor.connectTCP(host, port, factory)
return factory.deferred
def searchConfig():
global CFG, CFG_path
CFG = confPath()
CFG_path = '/var/etc'
if CFG:
CFG_path = path.dirname(CFG)
#############################################################
class HelpableNumberActionMap(NumberActionMap):
def __init__(self, parent, context, actions, prio):
alist = []
adict = {}
for (action, funchelp) in actions.iteritems():
alist.append((action, funchelp[1]))
adict[action] = funchelp[0]
NumberActionMap.__init__(self, [context], adict, prio)
parent.helpList.append((self, context, alist))
#############################################################
TranslationHelper = [
["Current time", _("Current time")],
["NodeID", _("NodeID")],
["Uptime", _("Uptime")],
["Connected clients", _("Connected clients")],
["Active clients", _("Active clients")],
["Total handled client ecm's", _("Total handled client ecm's")],
["Total handled client emm's", _("Total handled client emm's")],
["Peak load (max queued requests per workerthread)", _("Peak load (max queued requests per workerthread)")],
["card reader", _("card reader")],
["no or unknown card inserted", _("no or unknown card inserted")],
["system:", _("system:")],
["caid:", _("caid:")],
["provider:", _("provider:")],
["provid:", _("provid:")],
["using:", _("using:")],
["address:", _("address:")],
["hops:", _("hops:")],
["pid:", _("pid:")],
["share:", _("share:")],
["handled", _("handled")],
[" and", _(" and")],
["card", _("card")],
["Cardserial", _("Cardserial")],
["ecm time:", _("ecm time:")]]
def translateBlock(block):
for x in TranslationHelper:
if block.__contains__(x[0]):
block = block.replace(x[0], x[1])
return block
#############################################################
def getConfigValue(l):
list = l.split(":")
ret = ""
if len(list) > 1:
ret = (list[1]).replace("\n", "").replace("\r", "")
if ret.__contains__("#"):
idx = ret.index("#")
ret = ret[:idx]
while ret.startswith(" "):
ret = ret[1:]
while ret.endswith(" "):
ret = ret[:-1]
return ret
#############################################################
def notBlackListed(entry):
try:
f = open(config.cccaminfo.blacklist.value, "r")
content = f.read().split("\n")
f.close()
except:
content = []
ret = True
for x in content:
if x == entry:
ret = False
return ret
#############################################################
menu_list = [
_("General"),
_("Clients"),
_("Active clients"),
_("Servers"),
_("Shares"),
_("Share View"),
_("Extended Shares"),
_("Providers"),
_("Entitlements"),
_("ecm.info"),
_("Menu config"),
_("Local box"),
_("Remote box"),
_("Free memory"),
_("Switch config"),
_("About")]
#############################################################
if path.exists(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/lock_on.png")):
lock_on = loadPNG(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/lock_on.png"))
else:
lock_on = loadPNG("/usr/share/enigma2/skin_default/icons/lock_on.png")
if path.exists(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/lock_off.png")):
lock_off = loadPNG(resolveFilename(SCOPE_ACTIVE_SKIN, "icons/lock_off.png"))
else:
lock_off = loadPNG("/usr/share/enigma2/skin_default/icons/lock_off.png")
def getConfigNameAndContent(fileName):
try:
f = open(fileName, "r")
content = f.read()
f.close()
except:
content = ""
if content.startswith("#CONFIGFILE NAME="):
content = content.replace("\r", "\n")
name = content[17:]
idx = name.index("\n")
name = name[:idx]
else:
name = fileName.replace(CFG_path + "/", "")
return name, content
#############################################################
class CCcamList(MenuList):
def __init__(self, list):
MenuList.__init__(self, list, False, eListboxPythonMultiContent)
if screenwidth and screenwidth == 1920:
self.l.setItemHeight(45)
else:
self.l.setItemHeight(30)
self.l.setFont(0, gFont("Regular", 20))
self.l.setFont(1, gFont("Regular", 30))
class CCcamShareList(MenuList):
def __init__(self, list):
MenuList.__init__(self, list, False, eListboxPythonMultiContent)
if screenwidth and screenwidth == 1920:
self.l.setItemHeight(90)
else:
self.l.setItemHeight(60)
self.l.setFont(0, gFont("Regular", 18))
self.l.setFont(1, gFont("Regular", 27))
class CCcamConfigList(MenuList):
def __init__(self, list):
MenuList.__init__(self, list, False, eListboxPythonMultiContent)
if screenwidth and screenwidth == 1920:
self.l.setItemHeight(45)
else:
self.l.setItemHeight(30)
self.l.setFont(0, gFont("Regular", 20))
self.l.setFont(1, gFont("Regular", 30))
class CCcamShareViewList(MenuList):
def __init__(self, list):
MenuList.__init__(self, list, False, eListboxPythonMultiContent)
if screenwidth and screenwidth == 1920:
self.l.setItemHeight(45)
else:
self.l.setItemHeight(30)
self.l.setFont(0, gFont("Regular", 18))
self.l.setFont(1, gFont("Regular", 27))
def CCcamListEntry(name, idx):
res = [name]
if idx == 10:
idx = "red"
elif idx == 11:
idx = "green"
elif idx == 12:
idx = "yellow"
elif idx == 13:
idx = "blue"
elif idx == 14:
idx = "menu"
elif idx == 15:
idx = "info"
if path.exists(resolveFilename(SCOPE_ACTIVE_SKIN, "buttons/key_%s.png" % str(idx))):
png = resolveFilename(SCOPE_ACTIVE_SKIN, "buttons/key_%s.png" % str(idx))
else:
png = "/usr/share/enigma2/skin_default/buttons/key_%s.png" % str(idx)
if screenwidth and screenwidth == 1920:
if fileExists(png):
res.append(MultiContentEntryPixmapAlphaBlend(pos=(8, 3), size=(45, 45), png=loadPNG(png)))
res.append(MultiContentEntryText(pos=(68, 5), size=(900, 45), font=1, text=name))
else:
if fileExists(png):
res.append(MultiContentEntryPixmapAlphaBlend(pos=(5, 2), size=(30, 30), png=loadPNG(png)))
res.append(MultiContentEntryText(pos=(45, 3), size=(500, 30), font=0, text=name))
return res
def CCcamServerListEntry(name, color):
res = [name]
if path.exists(resolveFilename(SCOPE_ACTIVE_SKIN, "buttons/key_%s.png" % color)):
png = resolveFilename(SCOPE_ACTIVE_SKIN, "buttons/key_%s.png" % color)
else:
png = "/usr/share/enigma2/skin_default/buttons/key_%s.png" % color
if screenwidth and screenwidth == 1920:
if fileExists(png):
res.append(MultiContentEntryPixmapAlphaBlend(pos=(8, 3), size=(45, 45), png=loadPNG(png)))
res.append(MultiContentEntryText(pos=(68, 5), size=(900, 45), font=1, text=name))
else:
if fileExists(png):
res.append(MultiContentEntryPixmapAlphaBlend(pos=(5, 2), size=(30, 30), png=loadPNG(png)))
res.append(MultiContentEntryText(pos=(45, 3), size=(500, 30), font=0, text=name))
return res
def CCcamShareListEntry(hostname, type, caid, system, uphops, maxdown):
if screenwidth and screenwidth == 1920:
res = [(hostname, type, caid, system, uphops, maxdown),
MultiContentEntryText(pos=(0, 0), size=(450, 38), font=1, text=hostname),
MultiContentEntryText(pos=(450, 0), size=(450, 38), font=1, text=_("Type: ") + type, flags=RT_HALIGN_RIGHT),
MultiContentEntryText(pos=(0, 30), size=(450, 38), font=1, text=_("CaID: ") + caid),
MultiContentEntryText(pos=(450, 30), size=(450, 38), font=1, text=_("System: ") + system, flags=RT_HALIGN_RIGHT),
MultiContentEntryText(pos=(0, 60), size=(450, 38), font=1, text=_("Uphops: ") + uphops, flags=RT_HALIGN_RIGHT),
MultiContentEntryText(pos=(450, 60), size=(450, 38), font=1, text=_("Maxdown: ") + maxdown, flags=RT_HALIGN_RIGHT)]
return res
else:
res = [(hostname, type, caid, system, uphops, maxdown),
MultiContentEntryText(pos=(0, 0), size=(300, 25), font=0, text=hostname),
MultiContentEntryText(pos=(300, 0), size=(300, 25), font=0, text=_("Type: ") + type, flags=RT_HALIGN_RIGHT),
MultiContentEntryText(pos=(0, 20), size=(300, 25), font=0, text=_("CaID: ") + caid),
MultiContentEntryText(pos=(300, 20), size=(300, 25), font=0, text=_("System: ") + system, flags=RT_HALIGN_RIGHT),
MultiContentEntryText(pos=(0, 40), size=(300, 25), font=0, text=_("Uphops: ") + uphops),
MultiContentEntryText(pos=(300, 40), size=(300, 25), font=0, text=_("Maxdown: ") + maxdown, flags=RT_HALIGN_RIGHT)]
return res
def CCcamShareViewListEntry(caidprovider, providername, numberofcards, numberofreshare):
if screenwidth and screenwidth == 1920:
res = [(caidprovider, providername, numberofcards),
MultiContentEntryText(pos=(10, 0), size=(750, 38), font=1, text=providername),
MultiContentEntryText(pos=(750, 0), size=(75, 38), font=1, text=numberofcards, flags=RT_HALIGN_RIGHT),
MultiContentEntryText(pos=(825, 0), size=(75, 38), font=1, text=numberofreshare, flags=RT_HALIGN_RIGHT)]
return res
else:
res = [(caidprovider, providername, numberofcards),
MultiContentEntryText(pos=(0, 0), size=(500, 25), font=0, text=providername),
MultiContentEntryText(pos=(500, 0), size=(50, 25), font=0, text=numberofcards, flags=RT_HALIGN_RIGHT),
MultiContentEntryText(pos=(550, 0), size=(50, 25), font=0, text=numberofreshare, flags=RT_HALIGN_RIGHT)]
return res
def CCcamConfigListEntry(file):
res = [file]
try:
f = open(CFG, "r")
org = f.read()
f.close()
except:
org = ""
(name, content) = getConfigNameAndContent(file)
if content == org:
png = lock_on
else:
png = lock_off
if screenwidth and screenwidth == 1920:
res.append(MultiContentEntryPixmapAlphaBlend(pos=(8, 3), size=(38, 38), png=png))
res.append(MultiContentEntryText(pos=(53, 5), size=(800, 38), font=1, text=name))
else:
res.append(MultiContentEntryPixmapAlphaBlend(pos=(5, 2), size=(25, 25), png=png))
res.append(MultiContentEntryText(pos=(35, 3), size=(550, 25), font=0, text=name))
return res
def CCcamMenuConfigListEntry(name, blacklisted):
res = [name]
if blacklisted:
png = lock_off
else:
png = lock_on
if screenwidth and screenwidth == 1920:
res.append(MultiContentEntryPixmapAlphaBlend(pos=(8, 3), size=(38, 38), png=png))
res.append(MultiContentEntryText(pos=(53, 5), size=(800, 38), font=1, text=name))
else:
res.append(MultiContentEntryPixmapAlphaBlend(pos=(5, 2), size=(25, 25), png=png))
res.append(MultiContentEntryText(pos=(35, 3), size=(550, 25), font=0, text=name))
return res
#############################################################
class CCcamInfoMain(Screen):
def __init__(self, session):
Screen.__init__(self, session)
Screen.setTitle(self, _("CCcam Info"))
self.session = session
self["menu"] = CCcamList([])
self.working = False
self.Console = Console()
searchConfig()
if config.cccaminfo.profile.value == "":
self.readConfig()
else:
self.url = config.cccaminfo.profile.value
self["actions"] = NumberActionMap(["CCcamInfoActions"],
{
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal,
"red": self.red,
"green": self.green,
"yellow": self.yellow,
"blue": self.blue,
"menu": self.menu,
"info": self.info,
"ok": self.okClicked,
"cancel": self.close,
"up": self.up,
"down": self.down,
"left": self.left,
"right": self.right
}, -2)
self.onLayoutFinish.append(self.updateMenuList)
def updateMenuList(self):
self.working = True
self.menu_list = []
for x in self.menu_list:
del self.menu_list[0]
list = []
idx = 0
for x in menu_list:
if notBlackListed(x):
list.append(CCcamListEntry(x, idx))
self.menu_list.append(x)
idx += 1
self["menu"].setList(list)
self.working = False
def readConfig(self):
self.url = "http://127.0.0.1:16001"
username = None
password = None
try:
f = open(CFG, 'r')
for l in f:
if l.startswith('WEBINFO LISTEN PORT :'):
port = getConfigValue(l)
if port != "":
self.url = self.url.replace('16001', port)
elif l.startswith('WEBINFO USERNAME :'):
username = getConfigValue(l)
elif l.startswith('WEBINFO PASSWORD :'):
password = getConfigValue(l)
f.close()
except:
pass
if (username is not None) and (password is not None) and (username != "") and (password != ""):
self.url = self.url.replace('http://', ("http://%s:%s@" % (username, password)))
config.cccaminfo.profile.value = ""
config.cccaminfo.profile.save()
def profileSelected(self, url=None):
if url is not None:
self.url = url
config.cccaminfo.profile.value = self.url
config.cccaminfo.profile.save()
self.showInfo(_("New profile: ") + url, _("Profile"))
else:
self.showInfo(_("Using old profile: ") + self.url, _("Profile"))
def keyNumberGlobal(self, idx):
if self.working == False and (idx < len(self.menu_list)):
self.working = True
sel = self.menu_list[idx]
if sel == _("General"):
getPage(self.url).addCallback(self.showCCcamGeneral).addErrback(self.getWebpageError)
elif sel == _("Clients"):
getPage(self.url + "/clients").addCallback(self.showCCcamClients).addErrback(self.getWebpageError)
elif sel == _("Active clients"):
getPage(self.url + "/activeclients").addCallback(self.showCCcamClients).addErrback(self.getWebpageError)
elif sel == _("Servers"):
getPage(self.url + "/servers").addCallback(self.showCCcamServers).addErrback(self.getWebpageError)
elif sel == _("Shares"):
getPage(self.url + "/shares").addCallback(self.showCCcamShares).addErrback(self.getWebpageError)
elif sel == _("Share View"):
self.session.openWithCallback(self.workingFinished, CCcamShareViewMenu, self.url)
elif sel == _("Extended Shares"):
self.session.openWithCallback(self.workingFinished, CCcamInfoShareInfo, "None", self.url)
elif sel == _("Providers"):
getPage(self.url + "/providers").addCallback(self.showCCcamProviders).addErrback(self.getWebpageError)
elif sel == _("Entitlements"):
getPage(self.url + "/entitlements").addCallback(self.showCCcamEntitlements).addErrback(self.getWebpageError)
elif sel == _("ecm.info"):
self.session.openWithCallback(self.showEcmInfoFile, CCcamInfoEcmInfoSelection)
elif sel == _("Menu config"):
self.session.openWithCallback(self.updateMenuList, CCcamInfoMenuConfig)
elif sel == _("Local box"):
self.readConfig()
self.showInfo(_("Profile: Local box"), _("Local box"))
elif sel == _("Remote box"):
self.session.openWithCallback(self.profileSelected, CCcamInfoRemoteBoxMenu)
elif sel == _("Free memory"):
if not self.Console:
self.Console = Console()
self.working = True
self.Console.ePopen("free", self.showFreeMemory)
elif sel == _("Switch config"):
self.session.openWithCallback(self.workingFinished, CCcamInfoConfigSwitcher)
else:
self.showInfo(_("CCcam Info %s\nby AliAbdul %s\n\nThis plugin shows you the status of your CCcam.") % (VERSION, DATE), _("About"))
def red(self):
self.keyNumberGlobal(10)
def green(self):
self.keyNumberGlobal(11)
def yellow(self):
self.keyNumberGlobal(12)
def blue(self):
self.keyNumberGlobal(13)
def menu(self):
self.keyNumberGlobal(14)
def info(self):
self.keyNumberGlobal(15)
def okClicked(self):
self.keyNumberGlobal(self["menu"].getSelectedIndex())
def up(self):
if not self.working:
self["menu"].up()
def down(self):
if not self.working:
self["menu"].down()
def left(self):
if not self.working:
self["menu"].pageUp()
def right(self):
if not self.working:
self["menu"].pageDown()
def getWebpageError(self, error=""):
print str(error)
self.session.openWithCallback(self.workingFinished, MessageBox, _("Error reading webpage!"), MessageBox.TYPE_ERROR)
def showFile(self, file):
try:
f = open(file, "r")
content = f.read()
f.close()
except:
content = _("Could not open the file %s!") % file
self.showInfo(translateBlock(content), " ")
def showEcmInfoFile(self, file=None):
if file is not None:
self.showFile("/tmp/"+file)
self.workingFinished()
def showCCcamGeneral(self, html):
if html.__contains__('<BR><BR>'):
idx = html.index('<BR><BR>')
idx2 = html.index('<BR></BODY>')
html = html[idx+8:idx2].replace("<BR>", "\n").replace("\n\n", "\n")
self.infoToShow = html
getPage(self.url + "/shares").addCallback(self.showCCcamGeneral2).addErrback(self.getWebpageError)
else:
self.showInfo(_("Error reading webpage!"), _("Error"))
def showCCcamGeneral2(self, html):
if html.__contains__("Welcome to CCcam"):
idx = html.index("Welcome to CCcam")
html = html[idx+17:]
idx = html.index(" ")
version = html[:idx]
self.infoToShow = "%s%s\n%s" % (_("Version: "), version, self.infoToShow)
if html.__contains__("Available shares:"):
idx = html.index("Available shares:")
html = html[idx+18:]
idx = html.index("\n")
html = html[:idx]
self.showInfo(translateBlock("%s %s\n%s" % (_("Available shares:"), html, self.infoToShow)), _("General"))
else:
self.showInfo(translateBlock(self.infoToShow), _("General"))
def showCCcamClients(self, html):
firstLine = True
clientList = []
infoList = []
lines = html.split("\n")
for l in lines:
if l.__contains__('|'):
if firstLine:
firstLine = False
else:
list = l.split('|')
if len(list) > 8:
username = list[1].replace(" ", "")
if username != "":
hostname = list[2].replace(" ", "")
connected = list[3].replace(" ", "")
idleTime = list[4].replace(" ", "")
ecm = list[5].replace(" ", "")
emm = list[6].replace(" ", "")
version = list[7].replace(" ", "")
share = list[8].replace(" ", "")
if version == "":
version = "N/A"
ecmEmm = "ECM: " + ecm + " - EMM: " + emm
infoList.append([username, _("Hostname: ") + hostname, _("Connected: ") + connected, _("Idle Time: ") + idleTime, _("Version: ") + version, _("Last used share: ") + share, ecmEmm])
clientList.append(username)
self.set_title = _("CCcam Client Info")
self.openSubMenu(clientList, infoList, self.set_title)
def showCCcamServers(self, html):
firstLine = True
infoList = []
lines = html.split("\n")
for l in lines:
if l.__contains__('|'):
if firstLine:
firstLine = False
else:
list = l.split('|')
if len(list) > 7:
hostname = list[1].replace(" ", "")
if hostname != "":
connected = list[2].replace(" ", "")
type = list[3].replace(" ", "")
version = list[4].replace(" ", "")
nodeid = list[5].replace(" ", "")
cards = list[6].replace(" ", "")
if version == "":
version = "N/A"
if nodeid == "":
nodeid = "N/A"
infoList.append([hostname, _("Cards: ") + cards, _("Type: ") + type, _("Version: ") + version, _("NodeID: ") + nodeid, _("Connected: ") + connected])
self.session.openWithCallback(self.workingFinished, CCcamInfoServerMenu, infoList, self.url)
def showCCcamShares(self, html):
firstLine = True
sharesList = []
infoList = []
lines = html.split("\n")
for l in lines:
if l.__contains__('|'):
if firstLine:
firstLine = False
else:
list = l.split('|')
if len(list) > 7:
hostname = list[1].replace(" ", "")
if hostname != "":
type = list[2].replace(" ", "")
caid = list[3].replace(" ", "")
system = list[4].replace(" ", "")
string = list[6]
while string.startswith(" "):
string = string[1:]
while string.endswith(" "):
string = string[:-1]
idx = string.index(" ")
uphops = string[:idx]
string = string[idx+1:]
while string.startswith(" "):
string = string[1:]
maxdown = string
if len(caid) == 3:
caid = "0" + caid
infoList.append([hostname, _("Type: ") + type, _("CaID: ") + caid, _("System: ") + system, _("Uphops: ") + uphops, _("Maxdown: ") + maxdown])
sharesList.append(hostname + " - " + _("CaID: ") + caid)
self.set_title = _("CCcam Shares Info")
self.openSubMenu(sharesList, infoList, self.set_title)
def showCCcamProviders(self, html):
firstLine = True
providersList = []
infoList = []
lines = html.split("\n")
for l in lines:
if l.__contains__('|'):
if firstLine:
firstLine = False
else:
list = l.split('|')
if len(list) > 5:
caid = list[1].replace(" ", "")
if caid != "":
provider = list[2].replace(" ", "")
providername = list[3].replace(" ", "")
system = list[4].replace(" ", "")
infoList.append([_("CaID: ") + caid, _("Provider: ") + provider, _("Provider Name: ") + providername, _("System: ") + system])
providersList.append(_("CaID: ") + caid + " - " + _("Provider: ") + provider)
self.set_title = _("CCcam Provider Info")
self.openSubMenu(providersList, infoList, self.set_title)
def showCCcamEntitlements(self, html):
if html.__contains__('<PRE>'):
idx = html.index('<PRE>')
idx2 = html.index('</PRE>')
html = html[idx+5:idx2].replace("\n\n", "\n")
if html == "":
html = _("No card inserted!")
self.showInfo(translateBlock(html), _("Entitlements"))
else:
self.showInfo(_("Error reading webpage!"), _("Entitlements"))
def showInfo(self, info, set_title):
self.session.openWithCallback(self.workingFinished, CCcamInfoInfoScreen, info, set_title)
def openSubMenu(self, list, infoList, set_title):
self.session.openWithCallback(self.workingFinished, CCcamInfoSubMenu, list, infoList, set_title)
def workingFinished(self, callback=None):
self.working = False
def showFreeMemory(self, result, retval, extra_args):
if retval == 0:
if result.__contains__("Total:"):
idx = result.index("Total:")
result = result[idx+6:]
tmpList = result.split(" ")
list = []
for x in tmpList:
if x != "":
list.append(x)
self.showInfo("%s\n\n %s %s\n %s %s\n %s %s" % (_("Free memory:"), _("Total:"), list[0], _("Used:"), list[1], _("Free:"), list[2]), _("Free memory"))
else:
self.showInfo(result, _("Free memory"))
else:
self.showInfo(str(result), _("Free memory"))
#############################################################
class CCcamInfoEcmInfoSelection(Screen):
def __init__(self, session):
Screen.__init__(self, session)
Screen.setTitle(self, _("CCcam ECM Info"))
list = []
tmp = listdir("/tmp/")
for x in tmp:
if x.endswith('.info') and x.startswith('ecm'):
list.append(x)
self["list"] = MenuList(list)
self["actions"] = ActionMap(["CCcamInfoActions"], {"ok": self.ok, "cancel": self.close}, -1)
def ok(self):
self.close(self["list"].getCurrent())
#############################################################
class CCcamInfoInfoScreen(Screen):
def __init__(self, session, info, set_title):
Screen.__init__(self, session)
Screen.setTitle(self, set_title)
self["text"] = ScrollLabel(info)
self["actions"] = ActionMap(["CCcamInfoActions"],
{
"ok": self.close,
"cancel": self.close,
"up": self["text"].pageUp,
"down": self["text"].pageDown,
"left": self["text"].pageUp,
"right": self["text"].pageDown,
}, -1)
#############################################################
class CCcamShareViewMenu(Screen, HelpableScreen):
def __init__(self, session, url):
Screen.__init__(self, session)
HelpableScreen.__init__(self)
self.session = session
Screen.setTitle(self, _("CCcam Share Info"))
self.url = url
self.list = []
self.providers = {}
self.uphop = -1
self.working = True
self["list"] = CCcamShareViewList([])
self["uphops"] = Label()
self["cards"] = Label()
self["providers"] = Label()
self["reshare"] = Label()
self["title"] = Label()
self["actions"] = HelpableNumberActionMap(self, "CCcamInfoActions",
{
"cancel": (self.exit, _("close share view")),
"0": (self.getUphop, _("show cards with uphop 0")),
"1": (self.getUphop, _("show cards with uphop 1")),
"2": (self.getUphop, _("show cards with uphop 2")),
"3": (self.getUphop, _("show cards with uphop 3")),
"4": (self.getUphop, _("show cards with uphop 4")),
"5": (self.getUphop, _("show cards with uphop 5")),
"6": (self.getUphop, _("show cards with uphop 6")),
"7": (self.getUphop, _("show cards with uphop 7")),
"8": (self.getUphop, _("show cards with uphop 8")),
"9": (self.getUphop, _("show cards with uphop 9")),
"green": (self.showAll, _("show all cards")),
"incUphop": (self.incUphop, _("increase uphop by 1")),
"decUphop": (self.decUphop, _("decrease uphop by 1")),
"ok": (self.getServer, _("get the cards' server")),
}, -1)
self.onLayoutFinish.append(self.getProviders)
def exit(self):
if not self.working:
self.close()
def getProviders(self):
getPage(self.url + "/providers").addCallback(self.readProvidersCallback).addErrback(self.readError)
def readError(self, error=None):
self.session.open(MessageBox, _("Error reading webpage!"), MessageBox.TYPE_ERROR)
self.working = False
def readSharesCallback(self, html):
firstLine = True
providerList = []
countList = []
shareList = []
reshareList = []
self.hostList = []
self.caidList = []
count = 0
totalcards = 0
totalproviders = 0
resharecards = 0
numberofreshare = 0
lines = html.split("\n")
for l in lines:
if l.__contains__('|'):
if firstLine:
firstLine = False
else:
list = l.split("|")
if len(list) > 7:
hostname = list[1].replace(" ", "")
if hostname != "":
if self.uphop == -1:
caid = list[3].replace(" ", "")
provider = list[5].replace(" ", "")
caidprovider = self.formatCaidProvider(caid, provider)
string = list[6]
while string.startswith(" "):
string = string[1:]
while string.endswith(" "):
string = string[:-1]
idx = string.index(" ")
maxdown = string[idx+1:]
while maxdown.startswith(" "):
maxdown = maxdown[1:]
down = maxdown
if caidprovider not in providerList:
providerList.append(caidprovider)
count = 1
countList.append(count)
numberofcards = count
providername = self.providers.get(caidprovider, 'Multiple Providers given')
#if providername == 'Multiple Providers given':
# print caidprovider
numberofreshare = 0
if int(down)>0:
resharecards += 1
numberofreshare = 1
reshareList.append(numberofreshare)
shareList.append(CCcamShareViewListEntry(caidprovider, providername, str(numberofcards), str(numberofreshare)))
self.list.append([caidprovider, providername, numberofcards, numberofreshare])
totalproviders += 1
elif caidprovider in providerList:
i = providerList.index(caidprovider)
count = countList[i]
count += 1
countList[i] = count
numberofcards = count
if int(down)>0:
reshare = reshareList[i]
reshare += 1
reshareList[i] = reshare
numberofreshare = 0
numberofreshare = reshare
resharecards +=1
elif int(down)==0:
numberofreshare = reshareList[i]
providername = self.providers.get(caidprovider, 'Multiple Providers given')
shareList[i] = CCcamShareViewListEntry(caidprovider, providername, str(numberofcards), str(numberofreshare))
self.hostList.append(hostname)
self.caidList.append(caidprovider)
totalcards += 1
ulevel = _("All")
else:
updown = list[6]
while updown.startswith(" "):
updown = updown[1:]
while updown.endswith(" "):
updown = updown[:-1]
idx = updown.index(" ")
up = updown[:idx]
maxdown = updown[idx+1:]
while maxdown.startswith(" "):
maxdown = maxdown[1:]
down = maxdown
ulevel = str(self.uphop)
if int(up) == self.uphop:
caid = list[3].replace(" ", "")
provider = list[5].replace(" ", "")
caidprovider = self.formatCaidProvider(caid, provider)
if caidprovider not in providerList:
providerList.append(caidprovider)
count = 1
countList.append(count)
numberofcards = count
providername = self.providers.get(caidprovider, 'Multiple Providers given')
#if providername == 'Multiple Providers given':
# print caidprovider
numberofreshare = 0
if int(down)>0:
resharecards += 1
numberofreshare = 1
reshareList.append(numberofreshare)
shareList.append(CCcamShareViewListEntry(caidprovider, providername, str(numberofcards), str(numberofreshare)))
self.list.append([caidprovider, providername, numberofcards, numberofreshare])
totalproviders += 1
elif caidprovider in providerList:
i = providerList.index(caidprovider)
count = countList[i]
count += 1
countList[i] = count
numberofcards = count
if int(down)>0:
reshare = reshareList[i]
reshare += 1
#if caidprovider == "05021700":
# print "re: %d" %(reshare)
reshareList[i] = reshare
numberofreshare = 0
numberofreshare = reshare
resharecards +=1
elif int(down)==0:
numberofreshare = reshareList[i]
providername = self.providers.get(caidprovider, 'Multiple Providers given')
shareList[i] = CCcamShareViewListEntry(caidprovider, providername, str(numberofcards), str(numberofreshare))
self.hostList.append(hostname)
self.caidList.append(caidprovider)
totalcards += 1
#maxdown = list[6]
#while maxdown.startswith(" "):
#maxdown = maxdown[1:]
#down = maxdown
#if int(down)>0:
#resharecards +=1
self.instance.setTitle("%s (%s %d) %s %s" % (_("Share View"), _("Total cards:"), totalcards, _("Hops:"), ulevel))
self["title"].setText("%s (%s %d) %s %s" % (_("Share View"), _("Total cards:"), totalcards, _("Hops:"), ulevel))
self["list"].setList(shareList)
self["uphops"].setText("%s %s" %(_("Hops:"), ulevel))
self["cards"].setText("%s %s" %(_("Total cards:"), totalcards))
self["providers"].setText("%s %s" %(_("Providers:"), totalproviders))
self["reshare"].setText("%s %d" %(_("Reshare:"), resharecards))
self.working = False
def readProvidersCallback(self, html):
firstLine = True
lines = html.split("\n")
for l in lines:
if l.__contains__('|'):
if firstLine:
firstLine = False
else:
list = l.split('|')
if len(list) > 5:
caid = list[1].replace(" ", "")
if caid != "":
provider = list[2].replace(" ", "")
providername = list[3]
caidprovider = self.formatCaidProvider(caid, provider)
self.providers.setdefault(caidprovider, providername)
getPage(self.url + "/shares").addCallback(self.readSharesCallback).addErrback(self.readError)
def formatCaidProvider(self, caid, provider):
pos = provider.find(",")
if pos != -1:
provider = provider[pos+1:]
pos = provider.find(",")
if pos != -1:
provider = provider[0:pos]
if len(provider) == 0:
provider = "0000"
elif len(provider) == 1:
provider = "000" + provider
elif len(provider) == 2:
provider = "00" + provider
elif len(provider) == 3:
provider = "0" + provider
if len(caid) == 3:
caid = "0" + caid
if caid.startswith("0500") and len(provider) == 5:
caid = "050"
elif caid.startswith("0500") and len(provider) == 6:
caid = "05"
if caid.startswith("06"):
caidprovider = caid
elif caid.startswith("0d22"):
caidprovider = caid
elif caid.startswith("0d05"):
caidprovider = caid
elif caid.startswith("09"):
caidprovider = caid
elif caid.startswith("17"):
caidprovider = caid
elif caid.startswith("18"):
caidprovider = caid
elif caid.startswith("4a"):
caidprovider = caid
else:
caidprovider = caid + provider
return caidprovider
def getUphop(self, uphop):
self.uphop = uphop
self.getProviders()
def showAll(self):
self.uphop = -1
self.getProviders()
def incUphop(self):
if self.uphop < 9:
self.uphop += 1
self.getProviders()
def decUphop(self):
if self.uphop > -1:
self.uphop -= 1
self.getProviders()
def getServer(self):
server = _("Servers:") + " \n"
sel = self["list"].getCurrent()
if sel is not None:
e = 0
while e < len(self.caidList):
if sel[0][0] == self.caidList[e]:
pos = self.hostList[e].find(":")
if pos != -1:
server += self.hostList[e][0:pos] + "\n"
else:
server += self.hostList[e] + "\n"
e += 1
self.session.open(CCcamInfoInfoScreen, server, _("Servers"))
#############################################################
class CCcamInfoSubMenu(Screen):
def __init__(self, session, list, infoList, set_title):
Screen.__init__(self, session)
self.session = session
Screen.setTitle(self, _(set_title))
self.infoList = infoList
self["list"] = MenuList(list)
self["info"] = Label()
self["actions"] = ActionMap(["CCcamInfoActions"], {"ok": self.okClicked, "cancel": self.close}, -1)
self["list"].onSelectionChanged.append(self.showInfo)
self.onLayoutFinish.append(self.showInfo)
def okClicked(self):
info = self.getInfo()
if info != "":
self.session.open(MessageBox, info, MessageBox.TYPE_INFO)
def showInfo(self):
info = self.getInfo()
self["info"].setText(info)
def getInfo(self):
try:
idx = self["list"].getSelectedIndex()
info = ""
infoList = self.infoList[idx]
for x in infoList:
info += x + "\n"
return info
except:
return ""
#############################################################
class CCcamInfoServerMenu(Screen):
def __init__(self, session, infoList, url):
Screen.__init__(self, session)
self.session = session
Screen.setTitle(self, _("CCcam Server Info"))
self.infoList = infoList
self.url = url
list = []
for x in self.infoList:
if x[5].replace(_("Connected: "), "") == "": #offline - red
list.append(CCcamServerListEntry(x[0], "red"))
elif x[1] == _("Cards: 0"): #online with no card - blue
list.append(CCcamServerListEntry(x[0], "blue"))
else: #online with cards - green
list.append(CCcamServerListEntry(x[0], "green"))
self["list"] = CCcamList(list)
self["info"] = Label()
self["actions"] = ActionMap(["CCcamInfoActions"], {"ok": self.okClicked, "cancel": self.close}, -1)
self["list"].onSelectionChanged.append(self.showInfo)
self.onLayoutFinish.append(self.showInfo)
def showInfo(self):
info = self.getInfo()
self["info"].setText(info)
def getInfo(self):
try:
idx = self["list"].getSelectedIndex()
info = ""
infoList = self.infoList[idx]
for x in infoList:
info += x + "\n"
return info
except:
return ""
def okClicked(self):
sel = self["list"].getCurrent()
if sel is not None:
self.session.open(CCcamInfoShareInfo, sel[0], self.url)
#############################################################
class CCcamInfoRemoteBox:
def __init__(self, name, ip, username, password, port):
self.name = name
self.ip = ip
self.username = username
self.password = password
self.port = port
#############################################################
class CCcamInfoConfigMenu(ConfigListScreen, Screen):
def __init__(self, session, profile):
Screen.__init__(self, session)
Screen.setTitle(self, _("CCcam Info Setup"))
config.cccaminfo.name.value = profile.name
config.cccaminfo.ip.value = profile.ip
config.cccaminfo.username.value = profile.username
config.cccaminfo.password.value = profile.password
config.cccaminfo.port.value = profile.port
ConfigListScreen.__init__(self, [
getConfigListEntry(_("Name:"), config.cccaminfo.name),
getConfigListEntry(_("IP:"), config.cccaminfo.ip),
getConfigListEntry(_("Username:"), config.cccaminfo.username),
getConfigListEntry(_("Password:"), config.cccaminfo.password),
getConfigListEntry(_("Port:"), config.cccaminfo.port)])
self["actions"] = ActionMap(["CCcamInfoActions"], {"ok": self.okClicked, "cancel": self.exit}, -2)
def okClicked(self):
self.close(CCcamInfoRemoteBox(config.cccaminfo.name.value, config.cccaminfo.ip.value, config.cccaminfo.username.value, config.cccaminfo.password.value, config.cccaminfo.port.value))
def exit(self):
self.close(None)
#############################################################
class CCcamInfoRemoteBoxMenu(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.session = session
Screen.setTitle(self, _("CCcam Remote Info"))
self.list = []
self.profiles = []
self["key_red"] = Label(_("Delete"))
self["key_green"] = Label(_("New"))
self["key_yellow"] = Label(_("Location"))
self["key_blue"] = Label(_("Edit"))
self["list"] = MenuList([])
self["actions"] = ActionMap(["CCcamInfoActions"],
{
"cancel": self.exit,
"ok": self.profileSelected,
"red": self.delete,
"green": self.new,
"yellow": self.location,
"blue": self.edit
}, -1)
self.onLayoutFinish.append(self.readProfiles)
def readProfiles(self):
try:
f = open(config.cccaminfo.profiles.value, "r")
content = f.read()
f.close()
except:
content = ""
profiles = content.split("\n")
for profile in profiles:
if profile.__contains__("|"):
tmp = profile.split("|")
if len(tmp) == 5:
name = tmp[0]
ip = tmp[1]
username = tmp[2]
password = tmp[3]
port = int(tmp[4])
self.list.append(name)
self.profiles.append(CCcamInfoRemoteBox(name, ip, username, password, port))
self["list"].setList(self.list)
def saveConfigs(self):
content = ""
for x in self.profiles:
content = "%s\n%s|%s|%s|%s|%d" % (content, x.name, x.ip, x.username, x.password, x.port)
try:
f = open(config.cccaminfo.profiles.value, "w")
f.write(content)
f.close()
except:
pass
def exit(self):
self.saveConfigs()
self.close(None)
def profileSelected(self):
self.saveConfigs()
if len(self.list) > 0:
idx = self["list"].getSelectionIndex()
cur = self.profiles[idx]
if cur.ip == "":
url = None
else:
if cur.username != "" and cur.password != "":
url = "http://%s:%s@%s:%d" % (cur.username, cur.password, cur.ip, cur.port)
else:
url = "http://%s:%d" % (cur.ip, cur.port)
self.close(url)
def delete(self):
if len(self.list) > 0:
idx = self["list"].getSelectionIndex()
del self.list[idx]
del self.profiles[idx]
self["list"].setList(self.list)
def new(self):
self.session.openWithCallback(self.newCallback, CCcamInfoConfigMenu, CCcamInfoRemoteBox("Profile", "192.168.2.12", "", "", 16001))
def newCallback(self, callback):
if callback:
self.list.append(callback.name)
self.profiles.append(callback)
self["list"].setList(self.list)
def location(self):
self.session.openWithCallback(self.locationCallback, LocationBox)
def locationCallback(self, callback):
if callback:
config.cccaminfo.profiles.value = ("%s/CCcamInfo.profiles"%callback).replace("//", "/")
config.cccaminfo.profiles.save()
del self.list
self.list = []
del self.profiles
self.profiles = []
self.readProfiles()
def edit(self):
if len(self.list) > 0:
idx = self["list"].getSelectionIndex()
self.session.openWithCallback(self.editCallback, CCcamInfoConfigMenu, self.profiles[idx])
def editCallback(self, callback):
if callback:
idx = self["list"].getSelectionIndex()
del self.list[idx]
del self.profiles[idx]
self.list.append(callback.name)
self.profiles.append(callback)
self["list"].setList(self.list)
#############################################################
class CCcamInfoShareInfo(Screen):
def __init__(self, session, hostname, url):
Screen.__init__(self, session)
self.session = session
Screen.setTitle(self, _("CCcam Share Info"))
self.hostname = hostname
self.url = url
self.list = []
self.uphops = -1
self.maxdown = -1
self.working = True
self["key_red"] = Label(_("Uphops +"))
self["key_green"] = Label(_("Uphops -"))
self["key_yellow"] = Label(_("Maxdown +"))
self["key_blue"] = Label(_("Maxdown -"))
self["list"] = CCcamShareList([])
self["actions"] = ActionMap(["CCcamInfoActions"],
{
"cancel": self.exit,
"red": self.uhopsPlus,
"green": self.uhopsMinus,
"yellow": self.maxdownPlus,
"blue": self.maxdownMinus
}, -1)
self.onLayoutFinish.append(self.readShares)
def exit(self):
if not self.working:
self.close()
def readShares(self):
getPage(self.url + "/shares").addCallback(self.readSharesCallback).addErrback(self.readSharesError)
def readSharesError(self, error=None):
self.session.open(MessageBox, _("Error reading webpage!"), MessageBox.TYPE_ERROR)
self.working = False
def readSharesCallback(self, html):
firstLine = True
shareList = []
count = 0
lines = html.split("\n")
for l in lines:
if l.__contains__('|'):
if firstLine:
firstLine = False
else:
list = l.split("|")
if len(list) > 7:
hostname = list[1].replace(" ", "")
if (self.hostname == "None" or self.hostname == hostname) and hostname != "":
type = list[2].replace(" ", "")
caid = list[3].replace(" ", "")
system = list[4].replace(" ", "")
string = list[6]
while string.startswith(" "):
string = string[1:]
while string.endswith(" "):
string = string[:-1]
idx = string.index(" ")
uphops = string[:idx]
string = string[idx+1:]
while string.startswith(" "):
string = string[1:]
maxdown = string
if len(caid) == 3:
caid = "0" + caid
shareList.append(CCcamShareListEntry(hostname, type, caid, system, uphops, maxdown))
self.list.append([hostname, type, caid, system, uphops, maxdown])
count += 1
if self.uphops < 0:
textUhops = _("All")
else:
textUhops = str(self.uphops)
if self.maxdown < 0:
textMaxdown = _("All")
else:
textMaxdown = str(self.maxdown)
self.instance.setTitle("%s %d (%s%s / %s%s)" % (_("Available shares:"), count, _("Uphops: "), textUhops, _("Maxdown: "), textMaxdown))
self["list"].setList(shareList)
self.working = False
def uhopsPlus(self):
if not self.working:
self.uphops += 1
if self.uphops > 9:
self.uphops = -1
self.refreshList()
def uhopsMinus(self):
if not self.working:
self.uphops -= 1
if self.uphops < -1:
self.uphops = 9
self.refreshList()
def maxdownPlus(self):
if not self.working:
self.maxdown += 1
if self.maxdown > 9:
self.maxdown = -1
self.refreshList()
def maxdownMinus(self):
if not self.working:
self.maxdown -= 1
if self.maxdown < -1:
self.maxdown = 9
self.refreshList()
def refreshList(self):
shareList = []
count = 0
self.working = True
for x in self.list:
(hostname, type, caid, system, uphops, maxdown) = x
if (uphops == str(self.uphops) or self.uphops == -1) and (maxdown == str(self.maxdown) or self.maxdown == -1):
shareList.append(CCcamShareListEntry(hostname, type, caid, system, uphops, maxdown))
count += 1
if self.uphops < 0:
textUhops = _("All")
else:
textUhops = str(self.uphops)
if self.maxdown < 0:
textMaxdown = _("All")
else:
textMaxdown = str(self.maxdown)
self.instance.setTitle("%s %d (%s%s / %s%s)" % (_("Available shares:"), count, _("Uphops: "), textUhops, _("Maxdown: "), textMaxdown))
self["list"].setList(shareList)
self.working = False
#############################################################
class CCcamInfoConfigSwitcher(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.session = session
Screen.setTitle(self, _("CCcam Config Switcher"))
self["key_red"] = Label(_("Delete"))
self["key_green"] = Label(_("Activate"))
self["key_yellow"] = Label(_("Rename"))
self["key_blue"] = Label(_("Content"))
self["list"] = CCcamConfigList([])
self["actions"] = ActionMap(["CCcamInfoActions"],
{
"ok": self.activate,
"cancel": self.close,
"red": self.delete,
"green": self.activate,
"yellow": self.rename,
"blue": self.showContent
}, -1)
self.onLayoutFinish.append(self.showConfigs)
def showConfigs(self):
list = []
try:
files = listdir(CFG_path)
except:
files = []
for file in files:
if file.startswith("CCcam_") and file.endswith(".cfg"):
list.append(CCcamConfigListEntry(CFG_path + "/"+file))
self["list"].setList(list)
def delete(self):
fileName = self["list"].getCurrent()
if fileName is not None:
self.fileToDelete = fileName[0]
self.session.openWithCallback(self.deleteConfirmed, MessageBox, (_("Delete %s?") % self.fileToDelete))
def deleteConfirmed(self, yesno):
if yesno:
remove(self.fileToDelete)
if fileExists(self.fileToDelete):
self.session.open(MessageBox, _("Delete failed!"), MessageBox.TYPE_ERROR)
else:
self.session.open(MessageBox, _("Deleted %s!") % self.fileToDelete, MessageBox.TYPE_INFO)
self.showConfigs()
def activate(self):
fileName = self["list"].getCurrent()
if fileName is not None:
fileName = fileName[0]
# Delete old backup
backupFile = "%s.backup" % CFG
if fileExists(backupFile):
remove(backupFile)
# Create a backup of the original /var/etc/CCcam.cfg file
rename(CFG, backupFile)
# Now copy the selected cfg file
system("cp -f %s %s" % (fileName, CFG))
self.showConfigs()
def rename(self):
fileName = self["list"].getCurrent()
if fileName is not None:
self.fileToRename = fileName[0]
(name, sel) = getConfigNameAndContent(self.fileToRename)
self.session.openWithCallback(self.renameCallback, VirtualKeyBoard, title=_("Rename to:"), text=name)
def renameCallback(self, callback):
if callback is not None:
try:
f = open(self.fileToRename, "r")
content = f.read()
f.close()
except:
content = None
if content is not None:
content = content.replace("\r", "\n")
if content.startswith("#CONFIGFILE NAME=") and content.__contains__("\n"):
idx = content.index("\n")
content = content[:idx+2]
content = "#CONFIGFILE NAME=%s\n%s" % (callback, content)
try:
f = open(self.fileToRename, "w")
f.write(content)
f.close()
self.session.open(MessageBox, _("Renamed %s!") % self.fileToRename, MessageBox.TYPE_INFO)
self.showConfigs()
except:
self.session.open(MessageBox, _("Rename failed!"), MessageBox.TYPE_ERROR)
else:
self.session.open(MessageBox, _("Rename failed!"), MessageBox.TYPE_ERROR)
def showContent(self):
fileName = self["list"].getCurrent()
if fileName is not None:
try:
f = open(fileName[0], "r")
content = f.read()
f.close()
except:
content = _("Could not open the file %s!") % fileName[0]
self.session.open(CCcamInfoInfoScreen, content, _("CCcam Config Switcher"))
#############################################################
class CCcamInfoMenuConfig(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self.session = session
Screen.setTitle(self, _("CCcam Info Config"))
self["key_red"] = Label(_("Cancel"))
self["key_green"] = Label(_("Save"))
self["key_yellow"] = Label(_("Location"))
self["list"] = CCcamConfigList([])
self.getBlacklistedMenuEntries()
self["actions"] = ActionMap(["CCcamInfoActions"],
{
"ok": self.changeState,
"cancel": self.close,
"red": self.close,
"green": self.save,
"yellow": self.location
}, -1)
self.onLayoutFinish.append(self.showConfigs)
def getBlacklistedMenuEntries(self):
try:
f = open(config.cccaminfo.blacklist.value, "r")
content = f.read()
f.close()
self.blacklisted = content.split("\n")
except:
self.blacklisted = []
def changeState(self):
cur = self["list"].getCurrent()
if cur is not None:
cur = cur[0]
if cur in self.blacklisted:
idx = 0
for x in self.blacklisted:
if x == cur:
del self.blacklisted[idx]
break
idx += 1
else:
self.blacklisted.append(cur)
self.showConfigs()
def showConfigs(self):
list = []
for x in menu_list:
if x != _("Menu config"):
if x in self.blacklisted:
list.append(CCcamMenuConfigListEntry(x, True))
else:
list.append(CCcamMenuConfigListEntry(x, False))
self["list"].setList(list)
def save(self):
content = ""
for x in self.blacklisted:
content = content + x + "\n"
content = content.replace("\n\n", "\n")
try:
f = open(config.cccaminfo.blacklist.value, "w")
f.write(content)
f.close()
self.session.open(MessageBox, _("Configfile %s saved.") % config.cccaminfo.blacklist.value, MessageBox.TYPE_INFO)
except:
self.session.open(MessageBox, _("Could not save configfile %s!") % config.cccaminfo.blacklist.value, MessageBox.TYPE_ERROR)
def location(self):
self.session.openWithCallback(self.locationCallback, LocationBox)
def locationCallback(self, callback):
if callback:
config.cccaminfo.blacklist.value = ("%s/CCcamInfo.blacklisted"%callback).replace("//", "/")
config.cccaminfo.blacklist.save()
|
hdeeco/stb-gui
|
lib/python/Screens/CCcamInfo.py
|
Python
|
gpl-2.0
| 51,907
|
# -*- coding: utf-8 -*-
"""
Python documentation LaTeX file tokenizer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
For more documentation, look into the ``restwriter.py`` file.
:copyright: 2007-2008 by Georg Brandl.
:license: BSD.
"""
import re
from .scanner import Scanner
class Tokenizer(Scanner):
""" Lex a Python doc LaTeX document. """
specials = {
'{': 'bgroup',
'}': 'egroup',
'[': 'boptional',
']': 'eoptional',
'~': 'tilde',
'$': 'mathmode',
}
@property
def mtext(self):
return self.match.group()
def tokenize(self):
return TokenStream(self._tokenize())
def _tokenize(self):
lineno = 1
while not self.eos:
if self.scan(r'\\verb([^a-zA-Z])(.*?)(\1)'):
# specialcase \verb here
yield lineno, 'command', 'verb', '\\verb'
yield lineno, 'text', self.match.group(1), self.match.group(1)
yield lineno, 'text', self.match.group(2), self.match.group(2)
yield lineno, 'text', self.match.group(3), self.match.group(3)
elif self.scan(r'\\([a-zA-Z]+\*?)[ \t]*'):
yield lineno, 'command', self.match.group(1), self.mtext
elif self.scan(r'\\.'):
yield lineno, 'command', self.mtext[1], self.mtext
elif self.scan(r'\\\n'):
yield lineno, 'text', self.mtext, self.mtext
lineno += 1
elif self.scan(r'%(.*)\n[ \t]*'):
yield lineno, 'comment', self.match.group(1), self.mtext
lineno += 1
elif self.scan(r'[{}\[\]~$]'):
yield lineno, self.specials[self.mtext], self.mtext, self.mtext
elif self.scan(r'(\n[ \t]*){2,}'):
lines = self.mtext.count('\n')
yield lineno, 'parasep', '\n' * lines, self.mtext
lineno += lines
elif self.scan(r'\n[ \t]*'):
yield lineno, 'text', ' ', self.mtext
lineno += 1
elif self.scan(r'[^\\%}{\[\]~\n]+'):
yield lineno, 'text', self.mtext, self.mtext
else:
raise RuntimeError('unexpected text on line %d: %r' %
(lineno, self.data[self.pos:self.pos+100]))
class TokenStream(object):
"""
A token stream works like a normal generator just that
it supports peeking and pushing tokens back to the stream.
"""
def __init__(self, generator):
self._generator = generator
self._pushed = []
self.last = (1, 'initial', '')
def __iter__(self):
return self
def __nonzero__(self):
""" Are we at the end of the tokenstream? """
if self._pushed:
return True
try:
self.push(self.next())
except StopIteration:
return False
return True
def pop(self):
""" Return the next token from the stream. """
if self._pushed:
rv = self._pushed.pop()
else:
rv = self._generator.next()
self.last = rv
return rv
next = pop
def popmany(self, num=1):
""" Pop a list of tokens. """
return [self.next() for i in range(num)]
def peek(self):
""" Pop and push a token, return it. """
token = self.next()
self.push(token)
return token
def peekmany(self, num=1):
""" Pop and push a list of tokens. """
tokens = self.popmany(num)
for tok in tokens:
self.push(tok)
return tokens
def push(self, item):
""" Push a token back to the stream. """
self._pushed.append(item)
|
creasyw/IMTAphy
|
documentation/doctools/converter/converter/tokenizer.py
|
Python
|
gpl-2.0
| 3,773
|
import plumperfect_test
if __name__ == '__main__':
app = plumperfect_test.create_app()
app.run(
host = app.config.get( 'SERVER_HOST' ),
port = app.config.get( 'SERVER_PORT' ),
debug = app.config.get( 'DEBUG' )
)
|
cpcloud/plumperfect.test
|
run.py
|
Python
|
gpl-2.0
| 259
|
#!/usr/bin/python3
'''
Provides IPython console widget.
@author: Eitan Isaacson
@organization: IBM Corporation
@copyright: Copyright (c) 2007 IBM Corporation
@license: BSD
All rights reserved. This program and the accompanying materials are made
available under the terms of the BSD which accompanies this distribution, and
is available at U{http://www.opensource.org/licenses/bsd-license.php}
'''
# Taken from [1] (rev 64b6e0c, 2014-03-18) with slight modifications.
# The exact license [2] is reproduced below (3-clause BSD).
#
# [1] https://git.gnome.org/browse/accerciser/tree/plugins/ipython_view.py
# [2] https://git.gnome.org/browse/accerciser/tree/COPYING
# Copyright (c) 2005, 2007 IBM Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the IBM Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import gi
from gi.repository import Gtk as gtk
from gi.repository import Gdk as gdk
from gi.repository import GLib
from gi.repository import Pango
from pkg_resources import parse_version
import re
import sys
import os
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from functools import reduce
try:
import IPython
except ImportError:
IPython = None
class IterableIPShell:
'''
Create an IPython instance. Does not start a blocking event loop,
instead allow single iterations. This allows embedding in GTK+
without blockage.
@ivar IP: IPython instance.
@type IP: IPython.iplib.InteractiveShell
@ivar iter_more: Indicates if the line executed was a complete command,
or we should wait for more.
@type iter_more: integer
@ivar history_level: The place in history where we currently are
when pressing up/down.
@type history_level: integer
@ivar complete_sep: Seperation delimeters for completion function.
@type complete_sep: _sre.SRE_Pattern
'''
def __init__(self,argv=[],user_ns=None,user_global_ns=None,
cin=None, cout=None,cerr=None, input_func=None):
'''
@param argv: Command line options for IPython
@type argv: list
@param user_ns: User namespace.
@type user_ns: dictionary
@param user_global_ns: User global namespace.
@type user_global_ns: dictionary.
@param cin: Console standard input.
@type cin: IO stream
@param cout: Console standard output.
@type cout: IO stream
@param cerr: Console standard error.
@type cerr: IO stream
@param input_func: Replacement for builtin raw_input()
@type input_func: function
'''
io = IPython.utils.io
if input_func:
if parse_version(IPython.release.version) >= parse_version("1.2.1"):
IPython.terminal.interactiveshell.raw_input_original = input_func
else:
IPython.frontend.terminal.interactiveshell.raw_input_original = input_func
if cin:
io.stdin = io.IOStream(cin)
if cout:
io.stdout = io.IOStream(cout)
if cerr:
io.stderr = io.IOStream(cerr)
# This is to get rid of the blockage that accurs during
# IPython.Shell.InteractiveShell.user_setup()
io.raw_input = lambda x: None
os.environ['TERM'] = 'dumb'
excepthook = sys.excepthook
from IPython.config.loader import Config
cfg = Config()
cfg.InteractiveShell.colors = "Linux"
# InteractiveShell's __init__ overwrites io.stdout,io.stderr with
# sys.stdout, sys.stderr, this makes sure they are right
#
old_stdout, old_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = io.stdout.stream, io.stderr.stream
# InteractiveShell inherits from SingletonConfigurable, so use instance()
#
if parse_version(IPython.release.version) >= parse_version("1.2.1"):
self.IP = IPython.terminal.embed.InteractiveShellEmbed.instance(\
config=cfg, user_ns=user_ns)
else:
self.IP = IPython.frontend.terminal.embed.InteractiveShellEmbed.instance(\
config=cfg, user_ns=user_ns)
sys.stdout, sys.stderr = old_stdout, old_stderr
self.IP.system = lambda cmd: self.shell(self.IP.var_expand(cmd),
header='IPython system call: ')
# local_ns=user_ns)
#global_ns=user_global_ns)
#verbose=self.IP.rc.system_verbose)
self.IP.raw_input = input_func
sys.excepthook = excepthook
self.iter_more = 0
self.history_level = 0
self.complete_sep = re.compile('[\s\{\}\[\]\(\)]')
self.updateNamespace({'exit':lambda:None})
self.updateNamespace({'quit':lambda:None})
if not IPython.__version__.startswith('5.'): # HACK
self.IP.readline_startup_hook(self.IP.pre_readline)
# Workaround for updating namespace with sys.modules
#
self.__update_namespace()
# help() is blocking, which hangs GTK+.
import pydoc
self.updateNamespace({'help': pydoc.doc})
def __update_namespace(self):
'''
Update self.IP namespace for autocompletion with sys.modules
'''
for k, v in list(sys.modules.items()):
if not '.' in k:
self.IP.user_ns.update({k:v})
def execute(self):
'''
Executes the current line provided by the shell object.
'''
self.history_level = 0
# this is needed because some functions in IPython use 'print' to print
# output (like 'who')
#
orig_stdout = sys.stdout
sys.stdout = IPython.utils.io.stdout
orig_stdin = sys.stdin
sys.stdin = IPython.utils.io.stdin;
self.prompt = self.generatePrompt(self.iter_more)
self.IP.hooks.pre_prompt_hook()
if self.iter_more:
try:
self.prompt = self.generatePrompt(True)
except:
self.IP.showtraceback()
if self.IP.autoindent:
self.IP.rl_do_indent = True
try:
line = self.IP.raw_input(self.prompt)
except KeyboardInterrupt:
self.IP.write('\nKeyboardInterrupt\n')
self.IP.input_splitter.reset()
except:
self.IP.showtraceback()
else:
self.IP.input_splitter.push(line)
self.iter_more = self.IP.input_splitter.push_accepts_more()
self.prompt = self.generatePrompt(self.iter_more)
if (self.IP.SyntaxTB.last_syntax_error and
self.IP.autoedit_syntax):
self.IP.edit_syntax_error()
if not self.iter_more:
if parse_version(IPython.release.version) >= parse_version("2.0.0-dev"):
source_raw = self.IP.input_splitter.raw_reset()
else:
source_raw = self.IP.input_splitter.source_raw_reset()[1]
self.IP.run_cell(source_raw, store_history=True)
self.IP.rl_do_indent = False
else:
# TODO: Auto-indent
#
self.IP.rl_do_indent = True
pass
sys.stdout = orig_stdout
sys.stdin = orig_stdin
def generatePrompt(self, is_continuation):
'''
Generate prompt depending on is_continuation value
@param is_continuation
@type is_continuation: boolean
@return: The prompt string representation
@rtype: string
'''
# Backwards compatibility with ipyton-0.11
#
ver = IPython.__version__
if ver.startswith('5.'): # HACK
prompt = '... ' if is_continuation else '>>> '
elif '0.11' in ver:
prompt = self.IP.hooks.generate_prompt(is_continuation)
else:
if is_continuation:
prompt = self.IP.prompt_manager.render('in2')
else:
prompt = self.IP.prompt_manager.render('in')
return prompt
def historyBack(self):
'''
Provides one history command back.
@return: The command string.
@rtype: string
'''
self.history_level -= 1
if not self._getHistory():
self.history_level +=1
return self._getHistory()
def historyForward(self):
'''
Provides one history command forward.
@return: The command string.
@rtype: string
'''
if self.history_level < 0:
self.history_level += 1
return self._getHistory()
def _getHistory(self):
'''
Get's the command string of the current history level.
@return: Historic command string.
@rtype: string
'''
try:
rv = self.IP.user_ns['In'][self.history_level].strip('\n')
except IndexError:
rv = ''
return rv
def updateNamespace(self, ns_dict):
'''
Add the current dictionary to the shell namespace.
@param ns_dict: A dictionary of symbol-values.
@type ns_dict: dictionary
'''
self.IP.user_ns.update(ns_dict)
def complete(self, line):
'''
Returns an auto completed line and/or posibilities for completion.
@param line: Given line so far.
@type line: string
@return: Line completed as for as possible,
and possible further completions.
@rtype: tuple
'''
split_line = self.complete_sep.split(line)
if split_line[-1]:
possibilities = self.IP.complete(split_line[-1])
else:
completed = line
possibilities = ['', []]
if possibilities:
def _commonPrefix(str1, str2):
'''
Reduction function. returns common prefix of two given strings.
@param str1: First string.
@type str1: string
@param str2: Second string
@type str2: string
@return: Common prefix to both strings.
@rtype: string
'''
for i in range(len(str1)):
if not str2.startswith(str1[:i+1]):
return str1[:i]
return str1
if possibilities[1]:
common_prefix = reduce(_commonPrefix, possibilities[1]) or line[-1]
completed = line[:-len(split_line[-1])]+common_prefix
else:
completed = line
else:
completed = line
return completed, possibilities[1]
def shell(self, cmd,verbose=0,debug=0,header=''):
'''
Replacement method to allow shell commands without them blocking.
@param cmd: Shell command to execute.
@type cmd: string
@param verbose: Verbosity
@type verbose: integer
@param debug: Debug level
@type debug: integer
@param header: Header to be printed before output
@type header: string
'''
stat = 0
if verbose or debug: print((header+cmd))
# flush stdout so we don't mangle python's buffering
if not debug:
input, output = os.popen4(cmd)
print((output.read()))
output.close()
input.close()
class ConsoleView(gtk.TextView):
'''
Specialized text view for console-like workflow.
@cvar ANSI_COLORS: Mapping of terminal colors to X11 names.
@type ANSI_COLORS: dictionary
@ivar text_buffer: Widget's text buffer.
@type text_buffer: gtk.TextBuffer
@ivar color_pat: Regex of terminal color pattern
@type color_pat: _sre.SRE_Pattern
@ivar mark: Scroll mark for automatic scrolling on input.
@type mark: gtk.TextMark
@ivar line_start: Start of command line mark.
@type line_start: gtk.TextMark
'''
ANSI_COLORS = {'0;30': 'Black', '0;31': 'Red',
'0;32': 'Green', '0;33': 'Brown',
'0;34': 'Blue', '0;35': 'Purple',
'0;36': 'Cyan', '0;37': 'LightGray',
'1;30': 'DarkGray', '1;31': 'DarkRed',
'1;32': 'SeaGreen', '1;33': 'Yellow',
'1;34': 'LightBlue', '1;35': 'MediumPurple',
'1;36': 'LightCyan', '1;37': 'White'}
def __init__(self):
'''
Initialize console view.
'''
gtk.TextView.__init__(self)
self.modify_font(Pango.FontDescription('Mono'))
self.set_cursor_visible(True)
self.text_buffer = self.get_buffer()
self.mark = self.text_buffer.create_mark('scroll_mark',
self.text_buffer.get_end_iter(),
False)
for code in self.ANSI_COLORS:
self.text_buffer.create_tag(code,
foreground=self.ANSI_COLORS[code],
weight=700)
self.text_buffer.create_tag('0')
self.text_buffer.create_tag('notouch', editable=False)
self.color_pat = re.compile('\x01?\x1b\[(.*?)m\x02?')
self.line_start = \
self.text_buffer.create_mark('line_start',
self.text_buffer.get_end_iter(), True)
self.connect('key-press-event', self.onKeyPress)
def write(self, text, editable=False):
GLib.idle_add(self._write, text, editable)
def _write(self, text, editable=False):
'''
Write given text to buffer.
@param text: Text to append.
@type text: string
@param editable: If true, added text is editable.
@type editable: boolean
'''
segments = self.color_pat.split(text)
segment = segments.pop(0)
start_mark = self.text_buffer.create_mark(None,
self.text_buffer.get_end_iter(),
True)
self.text_buffer.insert(self.text_buffer.get_end_iter(), segment)
if segments:
ansi_tags = self.color_pat.findall(text)
for tag in ansi_tags:
i = segments.index(tag)
self.text_buffer.insert_with_tags_by_name(self.text_buffer.get_end_iter(),
segments[i+1], tag)
segments.pop(i)
if not editable:
self.text_buffer.apply_tag_by_name('notouch',
self.text_buffer.get_iter_at_mark(start_mark),
self.text_buffer.get_end_iter())
self.text_buffer.delete_mark(start_mark)
self.scroll_mark_onscreen(self.mark)
def showPrompt(self, prompt):
GLib.idle_add(self._showPrompt, prompt)
def _showPrompt(self, prompt):
'''
Prints prompt at start of line.
@param prompt: Prompt to print.
@type prompt: string
'''
self._write(prompt)
self.text_buffer.move_mark(self.line_start,
self.text_buffer.get_end_iter())
def changeLine(self, text):
GLib.idle_add(self._changeLine, text)
def _changeLine(self, text):
'''
Replace currently entered command line with given text.
@param text: Text to use as replacement.
@type text: string
'''
iter = self.text_buffer.get_iter_at_mark(self.line_start)
iter.forward_to_line_end()
self.text_buffer.delete(self.text_buffer.get_iter_at_mark(self.line_start), iter)
self._write(text, True)
def getCurrentLine(self):
'''
Get text in current command line.
@return: Text of current command line.
@rtype: string
'''
rv = self.text_buffer.get_slice(
self.text_buffer.get_iter_at_mark(self.line_start),
self.text_buffer.get_end_iter(), False)
return rv
def showReturned(self, text):
GLib.idle_add(self._showReturned, text)
def _showReturned(self, text):
'''
Show returned text from last command and print new prompt.
@param text: Text to show.
@type text: string
'''
iter = self.text_buffer.get_iter_at_mark(self.line_start)
iter.forward_to_line_end()
self.text_buffer.apply_tag_by_name(
'notouch',
self.text_buffer.get_iter_at_mark(self.line_start),
iter)
self._write('\n'+text)
if text:
self._write('\n')
self._write('\n') # Add extra line, like normal IPython
self._showPrompt(self.prompt)
self.text_buffer.move_mark(self.line_start, self.text_buffer.get_end_iter())
self.text_buffer.place_cursor(self.text_buffer.get_end_iter())
if self.IP.rl_do_indent:
indentation = self.IP.input_splitter.indent_spaces * ' '
self.text_buffer.insert_at_cursor(indentation)
def onKeyPress(self, widget, event):
'''
Key press callback used for correcting behavior for console-like
interfaces. For example 'home' should go to prompt, not to begining of
line.
@param widget: Widget that key press accored in.
@type widget: gtk.Widget
@param event: Event object
@type event: gtk.gdk.Event
@return: Return True if event should not trickle.
@rtype: boolean
'''
insert_mark = self.text_buffer.get_insert()
insert_iter = self.text_buffer.get_iter_at_mark(insert_mark)
selection_mark = self.text_buffer.get_selection_bound()
selection_iter = self.text_buffer.get_iter_at_mark(selection_mark)
start_iter = self.text_buffer.get_iter_at_mark(self.line_start)
if event.keyval == gdk.KEY_Home:
if event.state & gdk.ModifierType.CONTROL_MASK or \
event.state & gdk.ModifierType.MOD1_MASK:
pass
elif event.state & gdk.ModifierType.SHIFT_MASK:
self.text_buffer.move_mark(insert_mark, start_iter)
return True
else:
self.text_buffer.place_cursor(start_iter)
return True
elif event.keyval == gdk.KEY_Left:
insert_iter.backward_cursor_position()
if not insert_iter.editable(True):
return True
elif not event.string:
pass
elif start_iter.compare(insert_iter) <= 0 and \
start_iter.compare(selection_iter) <= 0:
pass
elif start_iter.compare(insert_iter) > 0 and \
start_iter.compare(selection_iter) > 0:
self.text_buffer.place_cursor(start_iter)
elif insert_iter.compare(selection_iter) < 0:
self.text_buffer.move_mark(insert_mark, start_iter)
elif insert_iter.compare(selection_iter) > 0:
self.text_buffer.move_mark(selection_mark, start_iter)
return self.onKeyPressExtend(event)
def onKeyPressExtend(self, event):
'''
For some reason we can't extend onKeyPress directly (bug #500900).
'''
pass
class IPythonView(ConsoleView, IterableIPShell):
'''
Sub-class of both modified IPython shell and L{ConsoleView} this makes
a GTK+ IPython console.
'''
def __init__(self):
'''
Initialize. Redirect I/O to console.
'''
ConsoleView.__init__(self)
self.cout = StringIO()
IterableIPShell.__init__(self, cout=self.cout, cerr=self.cout,
input_func=self.raw_input)
# self.connect('key_press_event', self.keyPress)
self.interrupt = False
self.execute()
self.prompt = self.generatePrompt(False)
self.cout.truncate(0)
self.showPrompt(self.prompt)
def raw_input(self, prompt=''):
'''
Custom raw_input() replacement. Get's current line from console buffer.
@param prompt: Prompt to print. Here for compatability as replacement.
@type prompt: string
@return: The current command line text.
@rtype: string
'''
if self.interrupt:
self.interrupt = False
raise KeyboardInterrupt
return self.getCurrentLine()
def onKeyPressExtend(self, event):
'''
Key press callback with plenty of shell goodness, like history,
autocompletions, etc.
@param widget: Widget that key press occured in.
@type widget: gtk.Widget
@param event: Event object.
@type event: gtk.gdk.Event
@return: True if event should not trickle.
@rtype: boolean
'''
if event.state & gdk.ModifierType.CONTROL_MASK and event.keyval == 99:
self.interrupt = True
self._processLine()
return True
elif event.keyval == gdk.KEY_Return:
self._processLine()
return True
elif event.keyval == gdk.KEY_Up:
self.changeLine(self.historyBack())
return True
elif event.keyval == gdk.KEY_Down:
self.changeLine(self.historyForward())
return True
elif event.keyval == gdk.KEY_Tab:
if not self.getCurrentLine().strip():
return False
completed, possibilities = self.complete(self.getCurrentLine())
if len(possibilities) > 1:
slice = self.getCurrentLine()
self.write('\n')
for symbol in possibilities:
self.write(symbol+'\n')
self.showPrompt(self.prompt)
self.changeLine(completed or slice)
return True
def _processLine(self):
'''
Process current command line.
'''
self.history_pos = 0
self.execute()
rv = self.cout.getvalue()
if rv: rv = rv.strip('\n')
self.showReturned(rv)
self.cout.truncate(0)
self.cout.seek(0)
if __name__ == "__main__":
window = gtk.Window()
window.set_default_size(640, 320)
window.connect('delete-event', lambda x, y: gtk.main_quit())
window.add(IPythonView())
window.show_all()
gtk.main()
|
strahlc/exaile
|
plugins/ipconsole/ipython_view/ipython_view.py
|
Python
|
gpl-2.0
| 21,886
|
# Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.variables.variable import Variable
from variable_functions import my_attribute_label
class percent_development_type_DDD_within_walking_distance(Variable):
"""There is exactly one variable corresponding to each defined development type dynamic_land_use_variables,
where "?" is the development type group's NAME (e.g. residential, commercial).
100 * [sum over c in cell.walking_radius of (if c.development_type.dynamic_land_use_variables == N then 1 else 0)] /
(number of cells within walking distance)"""
_return_type="float32"
def __init__(self, type_id):
self.type_id = type_id
self.number_of_development_type_wwd = \
"number_of_development_type_"+str(self.type_id)+"_within_walking_distance"
Variable.__init__(self)
def dependencies(self):
return [my_attribute_label(self.number_of_development_type_wwd)]
def compute(self, dataset_pool):
urbansim_constant = dataset_pool.get_dataset('urbansim_constant')
return 100.0*self.get_dataset().get_attribute(self.number_of_development_type_wwd)/ \
float(urbansim_constant["walking_distance_footprint"].sum())
from numpy import array
from numpy import ma
from opus_core.tests import opus_unittest
from opus_core.datasets.dataset_pool import DatasetPool
from opus_core.storage_factory import StorageFactory
class Tests(opus_unittest.OpusTestCase):
variable_name = "randstad.gridcell.percent_development_type_12_within_walking_distance"
def test_my_inputs( self ):
storage = StorageFactory().get_storage('dict_storage')
storage.write_table(
table_name='gridcells',
table_data={
'grid_id': array([1,2,3,4]),
'relative_x': array([1,2,1,2]),
'relative_y': array([1,1,2,2]),
'number_of_development_type_12_within_walking_distance': array([3, 5, 1, 0])
}
)
storage.write_table(
table_name='urbansim_constants',
table_data={
"walking_distance_circle_radius": array([150]),
'cell_size': array([150]),
"acres": array([105.0]),
}
)
dataset_pool = DatasetPool(package_order=['urbansim'],
storage=storage)
gridcell = dataset_pool.get_dataset('gridcell')
gridcell.compute_variables(self.variable_name,
dataset_pool=dataset_pool)
values = gridcell.get_attribute(self.variable_name)
should_be = array( [3/5.0*100.0,
5/5.0*100.0,
1/5.0*100.0,
0/5.0*100.0] )
self.assert_(ma.allclose( values, should_be, rtol=1e-7),
msg = "Error in " + self.variable_name)
if __name__=='__main__':
opus_unittest.main()
|
christianurich/VIBe2UrbanSim
|
3rdparty/opus/src/randstad/gridcell/percent_development_type_DDD_within_walking_distance.py
|
Python
|
gpl-2.0
| 3,195
|
# #
# Copyright 2009-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Generic EasyBuild support for installing Intel tools, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Ward Poelmans (Ghent University)
@author: Lumir Jasiok (IT4Innovations)
"""
import os
import re
import shutil
import tempfile
import glob
import easybuild.tools.environment as env
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import read_file
from easybuild.tools.run import run_cmd
from vsc.utils import fancylogger
_log = fancylogger.getLogger('generic.intelbase')
# different supported activation types (cfr. Intel documentation)
ACTIVATION_EXIST_LIC = 'exist_lic' # use a license which exists on the system
ACTIVATION_LIC_FILE = 'license_file' # use a license file
ACTIVATION_LIC_SERVER = 'license_server' # use a license server
ACTIVATION_SERIAL = 'serial_number' # use a serial number
ACTIVATION_TRIAL = 'trial_lic' # use trial activation
ACTIVATION_TYPES = [
ACTIVATION_EXIST_LIC,
ACTIVATION_EXIST_LIC,
ACTIVATION_LIC_SERVER,
ACTIVATION_SERIAL,
ACTIVATION_TRIAL,
]
# silent.cfg parameter name for type of license activation (cfr. options listed above)
ACTIVATION_NAME = 'ACTIVATION_TYPE' # since icc/ifort v2013_sp1, impi v4.1.1, imkl v11.1
ACTIVATION_NAME_2012 = 'ACTIVATION' # previous activation type parameter used in older versions
# silent.cfg parameter name for install prefix
INSTALL_DIR_NAME = 'PSET_INSTALL_DIR'
# silent.cfg parameter name for install mode
INSTALL_MODE_NAME = 'PSET_MODE'
# Older (2015 and previous) silent.cfg parameter name for install mode
INSTALL_MODE_NAME_2015 = 'INSTALL_MODE'
# Install mode for 2016 version
INSTALL_MODE = 'install'
# Install mode for 2015 and older versions
INSTALL_MODE_2015 = 'NONRPM'
# silent.cfg parameter name for license file/server specification
LICENSE_FILE_NAME = 'ACTIVATION_LICENSE_FILE' # since icc/ifort v2013_sp1, impi v4.1.1, imkl v11.1
LICENSE_FILE_NAME_2012 = 'PSET_LICENSE_FILE' # previous license file parameter used in older versions
COMP_ALL = 'ALL'
COMP_DEFAULTS = 'DEFAULTS'
class IntelBase(EasyBlock):
"""
Base class for Intel software
- no configure/make : binary release
- add license_file variable
"""
def __init__(self, *args, **kwargs):
"""Constructor, adds extra config options"""
super(IntelBase, self).__init__(*args, **kwargs)
self.license_file = 'UNKNOWN'
self.license_env_var = 'UNKNOWN'
self.home_subdir = os.path.join(os.getenv('HOME'), 'intel')
common_tmp_dir = os.path.dirname(tempfile.gettempdir()) # common tmp directory, same across nodes
self.home_subdir_local = os.path.join(common_tmp_dir, os.getenv('USER'), 'easybuild_intel')
self.install_components = None
@staticmethod
def extra_options(extra_vars=None):
extra_vars = EasyBlock.extra_options(extra_vars)
extra_vars.update({
'license_activation': [ACTIVATION_LIC_SERVER, "License activation type", CUSTOM],
# 'usetmppath':
# workaround for older SL5 version (5.5 and earlier)
# used to be True, but False since SL5.6/SL6
# disables TMP_PATH env and command line option
'usetmppath': [False, "Use temporary path for installation", CUSTOM],
'm32': [False, "Enable 32-bit toolchain", CUSTOM],
'components': [None, "List of components to install", CUSTOM],
})
return extra_vars
def parse_components_list(self):
"""parse the regex in the components extra_options and select the matching components
from the mediaconfig.xml file in the install dir"""
mediaconfigpath = os.path.join(self.cfg['start_dir'], 'pset', 'mediaconfig.xml')
if not os.path.isfile(mediaconfigpath):
raise EasyBuildError("Could not find %s to find list of components." % mediaconfigpath)
mediaconfig = read_file(mediaconfigpath)
available_components = re.findall("<Abbr>(?P<component>[^<]+)</Abbr>", mediaconfig, re.M)
self.log.debug("Intel components found: %s" % available_components)
self.log.debug("Using regex list: %s" % self.cfg['components'])
if COMP_ALL in self.cfg['components'] or COMP_DEFAULTS in self.cfg['components']:
if len(self.cfg['components']) == 1:
self.install_components = self.cfg['components']
else:
raise EasyBuildError("If you specify %s as components, you cannot specify anything else: %s",
' or '.join([COMP_ALL, COMP_DEFAULTS]), self.cfg['components'])
else:
self.install_components = []
for comp_regex in self.cfg['components']:
comps = [comp for comp in available_components if re.match(comp_regex, comp)]
self.install_components.extend(comps)
self.log.debug("Components to install: %s" % self.install_components)
def clean_home_subdir(self):
"""Remove contents of (local) 'intel' directory home subdir, where stuff is cached."""
self.log.debug("Cleaning up %s..." % self.home_subdir_local)
try:
for tree in os.listdir(self.home_subdir_local):
self.log.debug("... removing %s subtree" % tree)
path = os.path.join(self.home_subdir_local, tree)
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
except OSError, err:
raise EasyBuildError("Cleaning up intel dir %s failed: %s", self.home_subdir_local, err)
def setup_local_home_subdir(self):
"""
Intel script use $HOME/intel to cache stuff.
To enable parallel builds, we symlink $HOME/intel to a temporary dir on the local disk."""
try:
# make sure local directory exists
if not os.path.exists(self.home_subdir_local):
os.makedirs(self.home_subdir_local)
self.log.debug("Created local dir %s" % self.home_subdir_local)
if os.path.exists(self.home_subdir):
# if 'intel' dir in $HOME already exists, make sure it's the right symlink
symlink_ok = os.path.islink(self.home_subdir) and os.path.samefile(self.home_subdir,
self.home_subdir_local)
if not symlink_ok:
# rename current 'intel' dir
home_intel_bk = tempfile.mkdtemp(dir=os.path.dirname(self.home_subdir),
prefix='%s.bk.' % os.path.basename(self.home_subdir))
self.log.info("Moving %(ih)s to %(ihl)s, I need %(ih)s myself..." % {'ih': self.home_subdir,
'ihl': home_intel_bk})
shutil.move(self.home_subdir, home_intel_bk)
# set symlink in place
os.symlink(self.home_subdir_local, self.home_subdir)
self.log.debug("Created symlink (1) %s to %s" % (self.home_subdir, self.home_subdir_local))
else:
# if a broken symlink is present, remove it first
if os.path.islink(self.home_subdir):
os.remove(self.home_subdir)
os.symlink(self.home_subdir_local, self.home_subdir)
self.log.debug("Created symlink (2) %s to %s" % (self.home_subdir, self.home_subdir_local))
except OSError, err:
raise EasyBuildError("Failed to symlink %s to %s: %s", self.home_subdir_local, self.home_subdir, err)
def configure_step(self):
"""Configure: handle license file and clean home dir."""
# prepare (local) 'intel' home subdir
self.setup_local_home_subdir()
self.clean_home_subdir()
lic_env_var = None # environment variable that will be used
default_lic_env_var = 'INTEL_LICENSE_FILE'
lic_env_vars = [default_lic_env_var, 'LM_LICENSE_FILE']
env_var_names = ', '.join(['$%s' % x for x in lic_env_vars])
lic_env_var_vals = [(var, os.getenv(var)) for var in lic_env_vars]
license_specs = [(var, e) for (var, val) in lic_env_var_vals if val is not None for e in val.split(os.pathsep)]
if not license_specs:
self.log.debug("No env var from %s set, trying 'license_file' easyconfig parameter..." % lic_env_vars)
# obtain license path
self.license_file = self.cfg['license_file']
if self.license_file:
self.log.info("Using license file %s" % self.license_file)
else:
raise EasyBuildError("No license file defined, maybe set one these env vars: %s", env_var_names)
# verify license path
if not os.path.exists(self.license_file):
raise EasyBuildError("%s not found; correct 'license_file', or define one of the these env vars: %s",
self.license_file, env_var_names)
# set default environment variable for license specification
env.setvar(default_lic_env_var, self.license_file)
self.license_env_var = default_lic_env_var
else:
valid_license_specs = {}
# iterate through entries in environment variables until a valid license specification is found
# valid options are:
# * an (existing) license file
# * a directory containing atleast one file named *.lic (only one is used, first listed alphabetically)
# * a license server, format: <port>@<server>
server_port_regex = re.compile('^[0-9]+@\S+$')
for (lic_env_var, license_spec) in license_specs:
# a value that seems to match a license server specification
if server_port_regex.match(license_spec):
self.log.info("Found license server spec %s in $%s, retaining it" % (license_spec, lic_env_var))
valid_license_specs.setdefault(lic_env_var, set()).add(license_spec)
# an (existing) license file
elif os.path.isfile(license_spec):
self.log.info("Found existing license file %s via $%s, retaining it" % (license_spec, lic_env_var))
valid_license_specs.setdefault(lic_env_var, set()).add(license_spec)
# a directory, should contain at least one *.lic file (use only the first one)
elif os.path.isdir(license_spec):
lic_files = glob.glob("%s/*.lic" % license_spec)
if not lic_files:
self.log.debug("Found no license files (*.lic) in %s" % license_spec)
continue
# just pick the first .lic, if it's not correct, $INTEL_LICENSE_FILE should be adjusted instead
valid_license_specs.setdefault(lic_env_var, set()).add(lic_files[0])
self.log.info('Picked the first *.lic file from $%s: %s' % (lic_env_var, lic_files[0]))
if not valid_license_specs:
raise EasyBuildError("Cannot find a valid license specification in %s", license_specs)
# only retain one environment variable (by order of preference), retain all valid matches for that env var
for lic_env_var in lic_env_vars:
if lic_env_var in valid_license_specs:
self.license_env_var = lic_env_var
retained = valid_license_specs[self.license_env_var]
self.license_file = os.pathsep.join(retained)
# if we have multiple retained lic specs, specify to 'use a license which exists on the system'
if len(retained) > 1:
self.cfg['license_activation'] = ACTIVATION_EXIST_LIC
# $INTEL_LICENSE_FILE should always be set during installation with existing license
env.setvar(default_lic_env_var, self.license_file)
break
if self.license_file is None or self.license_env_var is None:
raise EasyBuildError("self.license_file or self.license_env_var still None, "
"something went horribly wrong...")
self.cfg['license_file'] = self.license_file
env.setvar(self.license_env_var, self.license_file)
self.log.info("Using Intel license specifications from $%s: %s", self.license_env_var, self.license_file)
# clean home directory
self.clean_home_subdir()
# determine list of components, based on 'components' easyconfig parameter (if specified)
if self.cfg['components']:
self.parse_components_list()
else:
self.log.debug("No components specified")
def build_step(self):
"""Binary installation files, so no building."""
pass
def install_step(self, silent_cfg_names_map=None, silent_cfg_extras=None):
"""Actual installation
- create silent cfg file
- set environment parameters
- execute command
"""
if silent_cfg_names_map is None:
silent_cfg_names_map = {}
# license file entry is only applicable with license file or server type of activation
# also check whether specified activation type makes sense
lic_activation = self.cfg['license_activation']
lic_file_server_activations = [ACTIVATION_LIC_FILE, ACTIVATION_LIC_SERVER]
other_activations = [act for act in ACTIVATION_TYPES if act not in lic_file_server_activations]
lic_file_entry = ""
if lic_activation in lic_file_server_activations:
lic_file_entry = "%(license_file_name)s=%(license_file)s"
elif not self.cfg['license_activation'] in other_activations:
raise EasyBuildError("Unknown type of activation specified: %s (known :%s)",
lic_activation, ACTIVATION_TYPES)
silent = '\n'.join([
"%(activation_name)s=%(activation)s",
lic_file_entry,
"%(install_dir_name)s=%(install_dir)s",
"ACCEPT_EULA=accept",
"%(install_mode_name)s=%(install_mode)s",
"CONTINUE_WITH_OPTIONAL_ERROR=yes",
"" # Add a newline at the end, so we can easily append if needed
]) % {
'activation_name': silent_cfg_names_map.get('activation_name', ACTIVATION_NAME),
'license_file_name': silent_cfg_names_map.get('license_file_name', LICENSE_FILE_NAME),
'install_dir_name': silent_cfg_names_map.get('install_dir_name', INSTALL_DIR_NAME),
'activation': self.cfg['license_activation'],
'license_file': self.license_file,
'install_dir': silent_cfg_names_map.get('install_dir', self.installdir),
'install_mode': silent_cfg_names_map.get('install_mode', INSTALL_MODE_2015),
'install_mode_name': silent_cfg_names_map.get('install_mode_name', INSTALL_MODE_NAME_2015),
}
if self.install_components is not None:
if len(self.install_components) == 1 and self.install_components[0] in [COMP_ALL, COMP_DEFAULTS]:
# no quotes should be used for ALL or DEFAULTS
silent += 'COMPONENTS=%s\n' % self.install_components[0]
elif self.install_components:
# a list of components is specified (needs quotes)
silent += 'COMPONENTS="' + ';'.join(self.install_components) + '"\n'
else:
raise EasyBuildError("Empty list of matching components obtained via %s", self.cfg['components'])
if silent_cfg_extras is not None:
if isinstance(silent_cfg_extras, dict):
silent += '\n'.join("%s=%s" % (key, value) for (key, value) in silent_cfg_extras.iteritems())
else:
raise EasyBuildError("silent_cfg_extras needs to be a dict")
# we should be already in the correct directory
silentcfg = os.path.join(os.getcwd(), "silent.cfg")
try:
f = open(silentcfg, 'w')
f.write(silent)
f.close()
except:
raise EasyBuildError("Writing silent cfg, failed", silent)
self.log.debug("Contents of %s:\n%s" % (silentcfg, silent))
# workaround for mktmp: create tmp dir and use it
tmpdir = os.path.join(self.cfg['start_dir'], 'mytmpdir')
try:
os.makedirs(tmpdir)
except:
raise EasyBuildError("Directory %s can't be created", tmpdir)
tmppathopt = ''
if self.cfg['usetmppath']:
env.setvar('TMP_PATH', tmpdir)
tmppathopt = "-t %s" % tmpdir
# set some extra env variables
env.setvar('LOCAL_INSTALL_VERBOSE', '1')
env.setvar('VERBOSE_MODE', '1')
env.setvar('INSTALL_PATH', self.installdir)
# perform installation
cmd = "./install.sh %s -s %s" % (tmppathopt, silentcfg)
return run_cmd(cmd, log_all=True, simple=True)
def move_after_install(self):
"""Move installed files to correct location after installation."""
subdir = os.path.join(self.installdir, self.name, self.version)
self.log.debug("Moving contents of %s to %s" % (subdir, self.installdir))
try:
# remove senseless symlinks, e.g. impi_5.0.1 and impi_latest
majver = '.'.join(self.version.split('.')[:-1])
for symlink in ['%s_%s' % (self.name, majver), '%s_latest' % self.name]:
symlink_fp = os.path.join(self.installdir, symlink)
if os.path.exists(symlink_fp):
os.remove(symlink_fp)
# move contents of 'impi/<version>' dir to installdir
for fil in os.listdir(subdir):
source = os.path.join(subdir, fil)
target = os.path.join(self.installdir, fil)
self.log.debug("Moving %s to %s" % (source, target))
shutil.move(source, target)
shutil.rmtree(os.path.join(self.installdir, self.name))
except OSError, err:
raise EasyBuildError("Failed to move contents of %s to %s: %s", subdir, self.installdir, err)
def make_module_extra(self):
"""Custom variable definitions in module file."""
txt = super(IntelBase, self).make_module_extra()
txt += self.module_generator.prepend_paths(self.license_env_var, [self.license_file],
allow_abs=True, expand_relpaths=False)
if self.cfg['m32']:
nlspath = os.path.join('idb', '32', 'locale', '%l_%t', '%N')
else:
nlspath = os.path.join('idb', 'intel64', 'locale', '%l_%t', '%N')
txt += self.module_generator.prepend_paths('NLSPATH', nlspath)
return txt
def cleanup_step(self):
"""Cleanup leftover mess
- clean home dir
- generic cleanup (get rid of build dir)
"""
self.clean_home_subdir()
super(IntelBase, self).cleanup_step()
# no default sanity check, needs to be implemented by derived class
|
valtandor/easybuild-easyblocks
|
easybuild/easyblocks/generic/intelbase.py
|
Python
|
gpl-2.0
| 20,761
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
def main():
from opal.applications.CGI import CGI
class HelloApp(CGI):
class Inventory(CGI.Inventory):
import pyre.inventory
name = pyre.inventory.str("name", default="world")
name.meta['tip'] = "the target of the greeting"
def main(self):
import os
pid = os.getpid()
euid = os.geteuid()
uid = os.getuid()
print '<pre>'
print "(%d, %s): Hello %s!" % (pid, uid, self.inventory.name)
print '</pre>'
return
def __init__(self):
CGI.__init__(self, 'hello')
return
import journal
journal.info('opal.cmdline').activate()
journal.debug('opal.commandline').activate()
app = HelloApp()
return app.run()
# main
if __name__ == '__main__':
# invoke the application shell
main()
# version
__id__ = "$Id: hello.py,v 1.1.1.1 2005/03/15 06:09:10 aivazis Exp $"
# End of file
|
bmi-forum/bmi-pyre
|
pythia-0.8/packages/opal/tests/hello.py
|
Python
|
gpl-2.0
| 1,368
|
import numpy as np
import tensorflow as tf
from swl.machine_learning.tensorflow_model import SimpleAuxiliaryInputTensorFlowModel
#--------------------------------------------------------------------
class SimpleSeq2SeqEncoderDecoder(SimpleAuxiliaryInputTensorFlowModel):
def __init__(self, encoder_input_shape, decoder_input_shape, decoder_output_shape, start_token, end_token, is_bidirectional=True, is_time_major=False):
self._input_seq_lens_ph = tf.placeholder(tf.int32, [None], name='encoder_input_seq_lens_ph')
self._output_seq_lens_ph = tf.placeholder(tf.int32, [None], name='decoder_output_seq_lens_ph')
self._batch_size_ph = tf.placeholder(tf.int32, [1], name='batch_size_ph')
self._start_token = start_token
self._end_token = end_token
self._is_bidirectional = is_bidirectional
self._is_time_major = is_time_major
super().__init__(encoder_input_shape, decoder_input_shape, decoder_output_shape)
def get_feed_dict(self, data, num_data, *args, **kwargs):
len_data = len(data)
if 1 == len_data:
encoder_inputs = data[0]
if self._is_time_major:
encoder_input_seq_lens = np.full(encoder_inputs.shape[1], encoder_inputs.shape[0], np.int32)
decoder_output_seq_lens = np.full(encoder_inputs.shape[1], encoder_inputs.shape[0], np.int32)
else:
encoder_input_seq_lens = np.full(encoder_inputs.shape[0], encoder_inputs.shape[1], np.int32)
decoder_output_seq_lens = np.full(encoder_inputs.shape[0], encoder_inputs.shape[1], np.int32)
feed_dict = {self._input_ph: data[0], self._input_seq_lens_ph: encoder_input_seq_lens, self._output_seq_lens_ph: decoder_output_seq_lens, self._batch_size_ph: [num_data]}
elif 3 == len_data:
encoder_inputs, decoder_inputs, decoder_outputs = data
if self._is_time_major:
encoder_input_seq_lens = np.full(encoder_inputs.shape[1], encoder_inputs.shape[0], np.int32)
if decoder_inputs is None or decoder_outputs is None:
decoder_output_seq_lens = np.full(encoder_inputs.shape[1], encoder_inputs.shape[0], np.int32)
else:
decoder_output_seq_lens = np.full(decoder_outputs.shape[1], decoder_outputs.shape[0], np.int32)
else:
encoder_input_seq_lens = np.full(encoder_inputs.shape[0], encoder_inputs.shape[1], np.int32)
if decoder_inputs is None or decoder_outputs is None:
decoder_output_seq_lens = np.full(encoder_inputs.shape[0], encoder_inputs.shape[1], np.int32)
else:
decoder_output_seq_lens = np.full(decoder_outputs.shape[0], decoder_outputs.shape[1], np.int32)
if decoder_inputs is None or decoder_outputs is None:
feed_dict = {self._input_ph: encoder_inputs, self._input_seq_lens_ph: encoder_input_seq_lens, self._output_seq_lens_ph: decoder_output_seq_lens, self._batch_size_ph: [num_data]}
else:
feed_dict = {self._input_ph: encoder_inputs, self._aux_input_ph: decoder_inputs, self._output_ph: decoder_outputs, self._input_seq_lens_ph: encoder_input_seq_lens, self._output_seq_lens_ph: decoder_output_seq_lens, self._batch_size_ph: [num_data]}
else:
raise ValueError('Invalid number of feed data: {}'.format(len_data))
return feed_dict
def _get_loss(self, y, t):
with tf.name_scope('loss'):
"""
if 1 == num_classes:
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=t, logits=y))
elif num_classes >= 2:
#loss = tf.reduce_mean(-tf.reduce_sum(t * tf.log(y), reduction_indices=[1]))
#loss = tf.reduce_mean(-tf.reduce_sum(t * tf.log(tf.clip_by_value(y, 1e-10, 1.0)), reduction_indices=[1]))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=t, logits=y))
else:
assert num_classes > 0, 'Invalid number of classes.'
"""
masks = tf.sequence_mask(self._output_seq_lens_ph, maxlen=tf.reduce_max(self._output_seq_lens_ph), dtype=tf.float32)
# Weighted cross-entropy loss for a sequence of logits.
#loss = tf.contrib.seq2seq.sequence_loss(logits=y, targets=t, weights=masks)
loss = tf.contrib.seq2seq.sequence_loss(logits=y, targets=tf.argmax(t, axis=-1), weights=masks)
tf.summary.scalar('loss', loss)
return loss
def _create_single_model(self, encoder_inputs, decoder_inputs, encoder_input_shape, decoder_input_shape, decoder_output_shape, is_training):
with tf.variable_scope('simple_seq2seq_encdec', reuse=tf.AUTO_REUSE):
# TODO [improve] >> It is not good to use num_time_steps.
#num_classes = decoder_output_shape[-1]
if self._is_time_major:
num_time_steps, num_classes = decoder_output_shape[0], decoder_output_shape[-1]
else:
num_time_steps, num_classes = decoder_output_shape[1], decoder_output_shape[-1]
if self._is_bidirectional:
return self._create_dynamic_bidirectional_model(encoder_inputs, decoder_inputs, is_training, self._input_seq_lens_ph, self._batch_size_ph, num_time_steps, num_classes, self._is_time_major)
else:
return self._create_dynamic_model(encoder_inputs, decoder_inputs, is_training, self._input_seq_lens_ph, self._batch_size_ph, num_time_steps, num_classes, self._is_time_major)
def _create_dynamic_model(self, encoder_inputs, decoder_inputs, is_training, encoder_input_seq_lens, batch_size, num_time_steps, num_classes, is_time_major):
num_enc_hidden_units = 128
num_dec_hidden_units = 128
keep_prob = 1.0
"""
num_enc_hidden_units = 256
num_dec_hidden_units = 256
keep_prob = 0.5
"""
# Defines cells.
enc_cell = self._create_unit_cell(num_enc_hidden_units, 'enc_unit_cell')
enc_cell = tf.contrib.rnn.DropoutWrapper(enc_cell, input_keep_prob=keep_prob, output_keep_prob=1.0, state_keep_prob=keep_prob)
# REF [paper] >> "Long Short-Term Memory-Networks for Machine Reading", arXiv 2016.
#enc_cell = tf.contrib.rnn.AttentionCellWrapper(enc_cell, attention_window_len, state_is_tuple=True)
dec_cell = self._create_unit_cell(num_dec_hidden_units, 'dec_unit_cell')
dec_cell = tf.contrib.rnn.DropoutWrapper(dec_cell, input_keep_prob=keep_prob, output_keep_prob=1.0, state_keep_prob=keep_prob)
# REF [paper] >> "Long Short-Term Memory-Networks for Machine Reading", arXiv 2016.
#dec_cell = tf.contrib.rnn.AttentionCellWrapper(dec_cell, attention_window_len, state_is_tuple=True)
# Encoder.
enc_cell_outputs, enc_cell_state = tf.nn.dynamic_rnn(enc_cell, encoder_inputs, sequence_length=encoder_input_seq_lens, time_major=is_time_major, dtype=tf.float32, scope='enc')
# Attention.
# REF [function] >> SimpleSeq2SeqEncoderDecoderWithTfAttention._create_dynamic_model() in ./simple_seq2seq_encdec_tf_attention.py.
# FIXME [implement] >> How to add dropout?
#with tf.variable_scope('simple_seq2seq_encdec', reuse=tf.AUTO_REUSE):
# dropout_rate = 1 - keep_prob
# # NOTE [info] >> If dropout_rate=0.0, dropout layer is not created.
# cell_outputs = tf.layers.dropout(cell_outputs, rate=dropout_rate, training=is_training, name='dropout')
"""
# REF [site] >> https://www.tensorflow.org/api_docs/python/tf/nn/static_rnn
# Method #1: Uses only inputs of the cell.
cell_state = cell.zero_state(batch_size, tf.float32) # Initial state.
cell_outputs = []
for inp in cell_inputs:
cell_output, cell_state = cell(inp, cell_state, scope='cell')
cell_outputs.append(cell_output)
# Method #2: Uses only the previous output of the cell.
cell_state = cell.zero_state(batch_size, tf.float32) # Initial state.
cell_input = tf.fill(tf.concat((batch_size, tf.constant([num_classes])), axis=-1), float(start_token)) # Initial input.
cell_outputs = []
for _ in range(num_time_steps):
cell_output, cell_state = cell(cell_input, cell_state, scope='cell')
cell_input = f(cell_output) # TODO [implement] >> e.g.) num_dec_hidden_units -> num_classes.
cell_outputs.append(cell_input)
#cell_outputs.append(cell_output)
# Method #3: Uses both inputs and the previous output of the cell.
cell_state = cell.zero_state(batch_size, tf.float32) # Initial state.
cell_input = tf.fill(tf.concat((batch_size, tf.constant([num_classes])), axis=-1), float(start_token)) # Initial input.
cell_outputs = []
for inp in cell_inputs:
cell_output, cell_state = cell(tf.concat([inp, cell_input], axis=-1), cell_state, scope='cell')
#cell_output, cell_state = cell(cell_input, tf.concat([inp, cell_state], axis=-1), scope='cell')
cell_input = f(cell_output) # TODO [implement] >> e.g.) num_dec_hidden_units -> num_classes.
cell_outputs.append(cell_input)
#cell_outputs.append(cell_output)
"""
# Decoder.
# NOTICE [info] {important} >> The same model has to be used in training and inference steps.
if is_training:
return self._get_decoder_output_for_training(dec_cell, enc_cell_state, decoder_inputs, num_time_steps, num_classes, is_time_major)
else:
return self._get_decoder_output_for_inference(dec_cell, enc_cell_state, batch_size, num_time_steps, num_classes, is_time_major)
def _create_dynamic_bidirectional_model(self, encoder_inputs, decoder_inputs, is_training, encoder_input_seq_lens, batch_size, num_time_steps, num_classes, is_time_major):
num_enc_hidden_units = 64
num_dec_hidden_units = 128
keep_prob = 1.0
"""
num_enc_hidden_units = 128
num_dec_hidden_units = 256
keep_prob = 0.5
"""
# Defines cells.
enc_cell_fw = self._create_unit_cell(num_enc_hidden_units, 'enc_fw_unit_cell') # Forward cell.
enc_cell_fw = tf.contrib.rnn.DropoutWrapper(enc_cell_fw, input_keep_prob=keep_prob, output_keep_prob=1.0, state_keep_prob=keep_prob)
# REF [paper] >> "Long Short-Term Memory-Networks for Machine Reading", arXiv 2016.
#enc_cell_fw = tf.contrib.rnn.AttentionCellWrapper(enc_cell_fw, attention_window_len, state_is_tuple=True)
enc_cell_bw = self._create_unit_cell(num_enc_hidden_units, 'enc_bw_unit_cell') # Backward cell.
enc_cell_bw = tf.contrib.rnn.DropoutWrapper(enc_cell_bw, input_keep_prob=keep_prob, output_keep_prob=1.0, state_keep_prob=keep_prob)
# REF [paper] >> "Long Short-Term Memory-Networks for Machine Reading", arXiv 2016.
#enc_cell_bw = tf.contrib.rnn.AttentionCellWrapper(enc_cell_bw, attention_window_len, state_is_tuple=True)
dec_cell = self._create_unit_cell(num_dec_hidden_units, 'dec_unit_cell')
dec_cell = tf.contrib.rnn.DropoutWrapper(dec_cell, input_keep_prob=keep_prob, output_keep_prob=1.0, state_keep_prob=keep_prob)
# REF [paper] >> "Long Short-Term Memory-Networks for Machine Reading", arXiv 2016.
#dec_cell = tf.contrib.rnn.AttentionCellWrapper(dec_cell, attention_window_len, state_is_tuple=True)
# Encoder.
enc_cell_outputs, enc_cell_states = tf.nn.bidirectional_dynamic_rnn(enc_cell_fw, enc_cell_bw, encoder_inputs, sequence_length=encoder_input_seq_lens, time_major=is_time_major, dtype=tf.float32, scope='enc')
enc_cell_outputs = tf.concat(enc_cell_outputs, axis=-1)
enc_cell_states = tf.contrib.rnn.LSTMStateTuple(tf.concat((enc_cell_states[0].c, enc_cell_states[1].c), axis=-1), tf.concat((enc_cell_states[0].h, enc_cell_states[1].h), axis=-1))
# Attention.
# REF [function] >> SimpleSeq2SeqEncoderDecoderWithTfAttention._create_dynamic_bidirectional_model() in ./simple_seq2seq_encdec_tf_attention.py.
# FIXME [implement] >> How to add dropout?
#with tf.variable_scope('simple_seq2seq_encdec', reuse=tf.AUTO_REUSE):
# dropout_rate = 1 - keep_prob
# # NOTE [info] >> If dropout_rate=0.0, dropout layer is not created.
# cell_outputs = tf.layers.dropout(cell_outputs, rate=dropout_rate, training=is_training, name='dropout')
# Decoder.
# NOTICE [info] {important} >> The same model has to be used in training and inference steps.
if is_training:
return self._get_decoder_output_for_training(dec_cell, enc_cell_states, decoder_inputs, num_time_steps, num_classes, is_time_major)
else:
return self._get_decoder_output_for_inference(dec_cell, enc_cell_states, batch_size, num_time_steps, num_classes, is_time_major)
def _create_projection_layer(self, dec_cell_outputs, num_classes):
with tf.variable_scope('projection', reuse=tf.AUTO_REUSE):
if 1 == num_classes:
return tf.layers.dense(dec_cell_outputs, 1, activation=tf.sigmoid, name='dense')
#return tf.layers.dense(dec_cell_outputs, 1, activation=tf.sigmoid, activity_regularizer=tf.contrib.layers.l2_regularizer(0.0001), name='dense')
elif num_classes >= 2:
return tf.layers.dense(dec_cell_outputs, num_classes, activation=tf.nn.softmax, name='dense')
#return tf.layers.dense(dec_cell_outputs, num_classes, activation=tf.nn.softmax, activity_regularizer=tf.contrib.layers.l2_regularizer(0.0001), name='dense')
else:
assert num_classes > 0, 'Invalid number of classes.'
return None
def _get_decoder_output_for_training(self, dec_cell, initial_cell_state, decoder_inputs, num_time_steps, num_classes, is_time_major):
# dec_cell_state is an instance of LSTMStateTuple, which stores (c, h), where c is the hidden state and h is the output.
#dec_cell_outputs, dec_cell_state = tf.nn.dynamic_rnn(dec_cell, decoder_inputs, initial_state=enc_cell_states, time_major=is_time_major, dtype=tf.float32, scope='dec')
#dec_cell_outputs, _ = tf.nn.dynamic_rnn(dec_cell, decoder_inputs, initial_state=enc_cell_states, time_major=is_time_major, dtype=tf.float32, scope='dec')
# Unstack: a tensor of shape (samples, time-steps, features) -> a list of 'time-steps' tensors of shape (samples, features).
decoder_inputs = tf.unstack(decoder_inputs, num_time_steps, axis=0 if is_time_major else 1)
dec_cell_state = initial_cell_state
dec_cell_outputs = []
for inp in decoder_inputs:
dec_cell_output, dec_cell_state = dec_cell(inp, dec_cell_state, scope='dec')
dec_cell_outputs.append(dec_cell_output)
# Stack: a list of 'time-steps' tensors of shape (samples, features) -> a tensor of shape (samples, time-steps, features).
dec_cell_outputs = tf.stack(dec_cell_outputs, axis=0 if is_time_major else 1)
return self._create_projection_layer(dec_cell_outputs, num_classes)
def _get_decoder_output_for_inference(self, dec_cell, initial_cell_state, batch_size, num_time_steps, num_classes, is_time_major):
dec_cell_state = initial_cell_state
dec_cell_input = tf.fill(tf.concat((batch_size, tf.constant([num_classes])), axis=-1), float(self._start_token)) # Initial input.
projection_outputs = []
for _ in range(num_time_steps):
dec_cell_output, dec_cell_state = dec_cell(dec_cell_input, dec_cell_state, scope='dec')
#dec_cell_output = tf.reshape(dec_cell_output, [None, 1, num_dec_hidden_units])
dec_cell_input = self._create_projection_layer(dec_cell_output, num_classes)
projection_outputs.append(dec_cell_input)
# Stack: a list of 'time-steps' tensors of shape (samples, features) -> a tensor of shape (samples, time-steps, features).
return tf.stack(projection_outputs, axis=0 if is_time_major else 1)
def _create_unit_cell(self, num_units, name):
#return tf.nn.rnn_cell.RNNCell(num_units, name=name)
return tf.nn.rnn_cell.LSTMCell(num_units, forget_bias=1.0, name=name)
#return tf.nn.rnn_cell.GRUCell(num_units, name=name)
|
sangwook236/sangwook-library
|
python/test/machine_learning/simple_seq2seq_encdec.py
|
Python
|
gpl-2.0
| 14,909
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from pyre.applications.Script import Script
class PickleScript(Script):
class Inventory(Script.Inventory):
import pyre.geometry
import pyre.inventory
name = pyre.inventory.str("name", default="geometry-test.pml")
modeller = pyre.inventory.facility("modeller", default="canister")
def main(self, *args, **kwds):
model = self.modeller.model()
outfile = file(self.inventory.name, "w")
self.modeller.saveModel(model, outfile)
return
def __init__(self):
Script.__init__(self, "pickle")
self.modeller = None
return
def _configure(self):
Script._configure(self)
self.modeller = self.inventory.modeller
return
# main
if __name__ == "__main__":
app = PickleScript()
app.run()
# version
__id__ = "$Id: pickle.py,v 1.1.1.1 2005/03/08 16:13:50 aivazis Exp $"
#
# End of file
|
bmi-forum/bmi-pyre
|
pythia-0.8/packages/pyre/tests/geometry/pickle.py
|
Python
|
gpl-2.0
| 1,303
|
# Copyright (c) 2008-2011 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
"""
Shared code for builder and tagger class
"""
import os
from tito.common import find_git_root
class ConfigObject(object):
"""
Perent class for Builder and Tagger with shared code
"""
def __init__(self, pkg_config=None, global_config=None):
"""
pkg_config - Package specific configuration.
global_config - Global configuration from rel-eng/tito.props.
"""
self.config = global_config
# Override global configurations using local configurations
for section in pkg_config.sections():
for options in pkg_config.options(section):
if not self.config.has_section(section):
self.config.add_section(section)
self.config.set(section, options,
pkg_config.get(section, options))
self.git_root = find_git_root()
self.rel_eng_dir = os.path.join(self.git_root, "rel-eng")
|
domcleal/tito
|
src/tito/config_object.py
|
Python
|
gpl-2.0
| 1,559
|
__author__ = '奇炜'
|
iamxi/jzspyw.com
|
lib/tool/__init__.py
|
Python
|
gpl-2.0
| 22
|
# Copyright (c) 2010 Google Inc. All rights reserved.
# Copyright (C) 2017 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.common.system.executive import ScriptError
from webkitpy.common.net.layouttestresults import LayoutTestResults
from webkitpy.common.net.jsctestresults import JSCTestResults
class UnableToApplyPatch(Exception):
def __init__(self, patch):
Exception.__init__(self)
self.patch = patch
class PatchIsNotValid(Exception):
def __init__(self, patch, failure_message):
Exception.__init__(self)
self.patch = patch
self.failure_message = failure_message
class PatchIsNotApplicable(Exception):
def __init__(self, patch):
Exception.__init__(self)
self.patch = patch
class PatchAnalysisTaskDelegate(object):
def parent_command(self):
raise NotImplementedError("subclasses must implement")
def run_command(self, command):
raise NotImplementedError("subclasses must implement")
def command_passed(self, message, patch):
raise NotImplementedError("subclasses must implement")
def command_failed(self, message, script_error, patch):
raise NotImplementedError("subclasses must implement")
def refetch_patch(self, patch):
raise NotImplementedError("subclasses must implement")
def expected_failures(self):
raise NotImplementedError("subclasses must implement")
def test_results(self):
raise NotImplementedError("subclasses must implement")
def archive_last_test_results(self, patch):
raise NotImplementedError("subclasses must implement")
def build_style(self):
raise NotImplementedError("subclasses must implement")
# We could make results_archive optional, but for now it's required.
def report_flaky_tests(self, patch, flaky_tests, results_archive):
raise NotImplementedError("subclasses must implement")
class PatchAnalysisTask(object):
def __init__(self, delegate, patch):
self._delegate = delegate
self._patch = patch
self._script_error = None
self._results_archive_from_patch_test_run = None
self._results_from_patch_test_run = None
self.error = None
def _run_command(self, command, success_message, failure_message):
if not self.validate():
raise PatchIsNotValid(self._patch, self.error)
try:
self._delegate.run_command(command)
self._delegate.command_passed(success_message, patch=self._patch)
return True
except ScriptError, e:
self._script_error = e
self.failure_status_id = self._delegate.command_failed(failure_message, script_error=self._script_error, patch=self._patch)
return False
def _clean(self):
return self._run_command([
"clean",
],
"Cleaned working directory",
"Unable to clean working directory")
def _update(self):
# FIXME: Ideally the status server log message should include which revision we updated to.
return self._run_command([
"update",
],
"Updated working directory",
"Unable to update working directory")
def _apply(self):
return self._run_command([
"apply-attachment",
"--no-update",
"--non-interactive",
self._patch.id(),
],
"Applied patch",
"Patch does not apply")
def _check_patch_relevance(self):
args = [
"check-patch-relevance",
]
if hasattr(self._delegate, 'group'):
args.append("--group=%s" % self._delegate.group())
return self._run_command(args, "Checked relevance of patch", "Patch was not relevant")
def _build(self):
args = [
"build",
"--no-clean",
"--no-update",
"--build-style=%s" % self._delegate.build_style(),
]
if hasattr(self._delegate, 'group'):
args.append("--group=%s" % self._delegate.group())
return self._run_command(args, "Built patch", "Patch does not build")
def _build_without_patch(self):
args = [
"build",
"--force-clean",
"--no-update",
"--build-style=%s" % self._delegate.build_style(),
]
if hasattr(self._delegate, 'group'):
args.append("--group=%s" % self._delegate.group())
return self._run_command(args, "Able to build without patch", "Unable to build without patch")
def _test(self):
args = [
"build-and-test",
"--no-clean",
"--no-update",
# Notice that we don't pass --build, which means we won't build!
"--test",
"--non-interactive",
"--build-style=%s" % self._delegate.build_style(),
]
if hasattr(self._delegate, 'group'):
args.append("--group=%s" % self._delegate.group())
return self._run_command(args, "Passed tests", "Patch does not pass tests")
def _build_and_test_without_patch(self):
args = [
"build-and-test",
"--force-clean",
"--no-update",
"--build",
"--test",
"--non-interactive",
"--build-style=%s" % self._delegate.build_style(),
]
if hasattr(self._delegate, 'group'):
args.append("--group=%s" % self._delegate.group())
return self._run_command(args, "Able to pass tests without patch", "Unable to pass tests without patch (tree is red?)")
def _land(self):
# Unclear if this should pass --quiet or not. If --parent-command always does the reporting, then it should.
return self._run_command([
"land-attachment",
"--force-clean",
"--non-interactive",
"--parent-command=" + self._delegate.parent_command(),
self._patch.id(),
],
"Landed patch",
"Unable to land patch")
def _report_flaky_tests(self, flaky_test_results, results_archive):
self._delegate.report_flaky_tests(self._patch, flaky_test_results, results_archive)
def _results_failed_different_tests(self, first, second):
first_failing_tests = [] if not first else first.failing_tests()
second_failing_tests = [] if not second else second.failing_tests()
return first_failing_tests != second_failing_tests
def _should_defer_patch_or_throw(self, failures_with_patch, results_archive_for_failures_with_patch, script_error, failure_id):
self._build_and_test_without_patch()
clean_tree_results = self._delegate.test_results()
if clean_tree_results.did_exceed_test_failure_limit():
# We cannot know whether the failures we saw in the test runs with the patch are expected.
return True
failures_introduced_by_patch = frozenset(failures_with_patch) - frozenset(clean_tree_results.failing_test_results())
if failures_introduced_by_patch:
self.failure_status_id = failure_id
# report_failure will either throw or return false.
return not self.report_failure(results_archive_for_failures_with_patch, LayoutTestResults(failures_introduced_by_patch, did_exceed_test_failure_limit=False), script_error)
# In this case, we know that all of the failures that we saw with the patch were
# also present without the patch, so we don't need to defer.
return False
# FIXME: Abstract out common parts of the retry logic.
def _retry_jsc_tests(self):
first_results = self._delegate.test_results()
first_script_error = self._script_error
first_failure_status_id = self.failure_status_id
if first_results is None:
return False
if self._test():
return True
second_results = self._delegate.test_results()
second_script_error = self._script_error
if second_results is None:
return False
consistently_failing_test_results = JSCTestResults.intersection(first_results, second_results)
self._build_and_test_without_patch()
clean_tree_results = self._delegate.test_results()
if clean_tree_results is None:
return False
if consistently_failing_test_results.is_subset(clean_tree_results):
return True
self.failure_status_id = first_failure_status_id
return self.report_failure(None, consistently_failing_test_results, first_script_error)
def _retry_layout_tests(self):
# Note: archive_last_test_results deletes the results directory, making these calls order-sensitve.
# We could remove this dependency by building the test_results from the archive.
first_results = self._delegate.test_results()
first_results_archive = self._delegate.archive_last_test_results(self._patch)
first_script_error = self._script_error
first_failure_status_id = self.failure_status_id
if self._test() and not first_results.did_exceed_test_failure_limit():
# Only report flaky tests if we were successful at parsing results.json and archiving results.
if first_results and first_results_archive:
self._report_flaky_tests(first_results.failing_test_results(), first_results_archive)
return True
second_results = self._delegate.test_results()
second_results_archive = self._delegate.archive_last_test_results(self._patch)
second_script_error = self._script_error
second_failure_status_id = self.failure_status_id
if second_results.did_exceed_test_failure_limit() and first_results.did_exceed_test_failure_limit():
self._build_and_test_without_patch()
clean_tree_results = self._delegate.test_results()
if (len(first_results.failing_tests()) - len(clean_tree_results.failing_tests())) <= 5:
return False
self.failure_status_id = first_failure_status_id
return self.report_failure(first_results_archive, first_results, first_script_error)
if second_results.did_exceed_test_failure_limit():
self._should_defer_patch_or_throw(first_results.failing_test_results(), first_results_archive,
first_script_error, first_failure_status_id)
return False
if first_results.did_exceed_test_failure_limit():
self._should_defer_patch_or_throw(second_results.failing_test_results(), second_results_archive,
second_script_error, second_failure_status_id)
return False
if self._results_failed_different_tests(first_results, second_results):
first_failing_results_set = frozenset(first_results.failing_test_results())
second_failing_results_set = frozenset(second_results.failing_test_results())
tests_that_only_failed_first = first_failing_results_set.difference(second_failing_results_set)
self._report_flaky_tests(tests_that_only_failed_first, first_results_archive)
tests_that_only_failed_second = second_failing_results_set.difference(first_failing_results_set)
self._report_flaky_tests(tests_that_only_failed_second, second_results_archive)
tests_that_consistently_failed = first_failing_results_set.intersection(second_failing_results_set)
if tests_that_consistently_failed:
if self._should_defer_patch_or_throw(tests_that_consistently_failed, first_results_archive,
first_script_error, first_failure_status_id):
return False # Defer patch
# At this point we know that at least one test flaked, but no consistent failures
# were introduced. This is a bit of a grey-zone.
return False # Defer patch
if self._should_defer_patch_or_throw(first_results.failing_test_results(), first_results_archive,
first_script_error, first_failure_status_id):
return False # Defer patch
# At this point, we know that the first and second runs had the exact same failures,
# and that those failures are all present on the clean tree, so we can say with certainty
# that the patch is good.
return True
def _test_patch(self):
if self._test():
return True
if hasattr(self._delegate, 'group') and self._delegate.group() == "jsc":
return self._retry_jsc_tests()
else:
return self._retry_layout_tests()
def results_archive_from_patch_test_run(self, patch):
assert(self._patch.id() == patch.id()) # PatchAnalysisTask is not currently re-useable.
return self._results_archive_from_patch_test_run
def results_from_patch_test_run(self, patch):
assert(self._patch.id() == patch.id()) # PatchAnalysisTask is not currently re-useable.
return self._results_from_patch_test_run
def report_failure(self, results_archive=None, results=None, script_error=None):
if not self.validate():
return False
self._results_archive_from_patch_test_run = results_archive
self._results_from_patch_test_run = results
raise script_error or self._script_error
def validate(self):
raise NotImplementedError("subclasses must implement")
def run(self):
raise NotImplementedError("subclasses must implement")
|
Debian/openjfx
|
modules/web/src/main/native/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py
|
Python
|
gpl-2.0
| 15,182
|
"""
Virt management features
Copyright 2007, Red Hat, Inc
Michael DeHaan <mdehaan@redhat.com>
This software may be freely redistributed under the terms of the GNU
general public license.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
"""
# warning: virt management is rather complicated
# to see a simple example of func, look at the
# service control module. API docs on how
# to use this to come.
# other modules
import os
import sub_process
import libvirt
# our modules
import codes
import func_module
VIRT_STATE_NAME_MAP = {
0 : "running",
1 : "running",
2 : "running",
3 : "paused",
4 : "shutdown",
5 : "shutdown",
6 : "crashed"
}
class FuncLibvirtConnection(object):
version = "0.0.1"
api_version = "0.0.1"
description = "Virtualization items through func."
def __init__(self):
cmd = sub_process.Popen("uname -r", shell=True, stdout=sub_process.PIPE)
output = cmd.communicate()[0]
if output.find("xen") != -1:
conn = libvirt.open(None)
else:
conn = libvirt.open("qemu:///system")
if not conn:
raise codes.FuncException("hypervisor connection failure")
self.conn = conn
def find_vm(self, vmid):
"""
Extra bonus feature: vmid = -1 returns a list of everything
"""
conn = self.conn
vms = []
# this block of code borrowed from virt-manager:
# get working domain's name
ids = conn.listDomainsID();
for id in ids:
vm = conn.lookupByID(id)
vms.append(vm)
# get defined domain
names = conn.listDefinedDomains()
for name in names:
vm = conn.lookupByName(name)
vms.append(vm)
if vmid == -1:
return vms
for vm in vms:
if vm.name() == vmid:
return vm
raise codes.FuncException("virtual machine %s not found" % vmid)
def shutdown(self, vmid):
return self.find_vm(vmid).shutdown()
def pause(self, vmid):
return self.suspend(self.conn,vmid)
def unpause(self, vmid):
return self.resume(self.conn,vmid)
def suspend(self, vmid):
return self.find_vm(vmid).suspend()
def resume(self, vmid):
return self.find_vm(vmid).resume()
def create(self, vmid):
return self.find_vm(vmid).create()
def destroy(self, vmid):
return self.find_vm(vmid).destroy()
def undefine(self, vmid):
return self.find_vm(vmid).undefine()
def get_status2(self, vm):
state = vm.info()[0]
# print "DEBUG: state: %s" % state
return VIRT_STATE_NAME_MAP.get(state,"unknown")
def get_status(self, vmid):
state = self.find_vm(vmid).info()[0]
return VIRT_STATE_NAME_MAP.get(state,"unknown")
def nodeinfo(self):
return self.conn.getInfo()
def get_type(self):
return self.conn.getType()
class Virt(func_module.FuncModule):
def __get_conn(self):
self.conn = FuncLibvirtConnection()
return self.conn
def state(self):
vms = self.list_vms()
state = []
for vm in vms:
state_blurb = self.conn.get_status(vm)
state.append("%s %s" % (vm,state_blurb))
return state
def info(self):
vms = self.list_vms()
info = dict()
for vm in vms:
data = self.conn.find_vm(vm).info()
# libvirt returns maxMem, memory, and cpuTime as long()'s, which
# xmlrpclib tries to convert to regular int's during serialization.
# This throws exceptions, so convert them to strings here and
# assume the other end of the xmlrpc connection can figure things
# out or doesn't care.
info[vm] = {
"state" : VIRT_STATE_NAME_MAP.get(data[0],"unknown"),
"maxMem" : str(data[1]),
"memory" : str(data[2]),
"nrVirtCpu" : data[3],
"cpuTime" : str(data[4])
}
return info
def nodeinfo(self):
self.__get_conn()
info = dict()
data = self.conn.nodeinfo()
info = {
"cpumodel" : str(data[0]),
"phymemory" : str(data[1]),
"cpus" : str(data[2]),
"cpumhz" : str(data[3]),
"numanodes" : str(data[4]),
"sockets" : str(data[5]),
"cpucores" : str(data[6]),
"cputhreads" : str(data[7])
}
return info
def list_vms(self):
self.conn = self.__get_conn()
vms = self.conn.find_vm(-1)
results = []
for x in vms:
try:
results.append(x.name())
except:
pass
return results
def virttype(self):
return self.__get_conn().get_type()
def autostart(self, vm):
self.conn = self.__get_conn()
if self.conn.get_type() == "Xen":
autostart_args = [
"/bin/ln",
"-s",
"/etc/xen/%s" % vm,
"/etc/xen/auto"
]
else:
# When using KVM, we need to make sure the autostart
# directory exists
mkdir_args = [
"/bin/mkdir",
"-p",
"/etc/libvirt/qemu/autostart"
]
sub_process.call(mkdir_args,shell=False,close_fds=True)
# We aren't using virsh autostart because we want
# the command to work even when the VM isn't running
autostart_args = [
"/bin/ln",
"-s",
"/etc/libvirt/qemu/%s.xml" % vm,
"/etc/libvirt/qemu/autostart/%s.xml" % vm
]
return sub_process.call(autostart_args,shell=False,close_fds=True)
def freemem(self):
self.conn = self.__get_conn()
# Start with the physical memory and subtract
memory = self.conn.nodeinfo()[1]
# Take 256M off which is reserved for Domain-0
memory = memory - 256
vms = self.conn.find_vm(-1)
for vm in vms:
# Exclude stopped vms and Domain-0 by using
# ids greater than 0
if vm.ID() > 0:
# This node is active - remove its memory (in bytes)
memory = memory - int(vm.info()[2])/1024
return memory
def install(self, server_name, target_name, system=False, virt_name=None, virt_path=None, graphics=False):
"""
Install a new virt system by way of a named cobbler profile.
"""
# Example:
# install("bootserver.example.org", "fc7webserver", True)
# install("bootserver.example.org", "client.example.org", True, "client-disk0", "HostVolGroup00")
conn = self.__get_conn()
if conn is None:
raise codes.FuncException("no connection")
if not os.path.exists("/usr/bin/koan"):
raise codes.FuncException("no /usr/bin/koan")
target = "profile"
if system:
target = "system"
koan_args = [
"/usr/bin/koan",
"--virt",
"--%s=%s" % (target, target_name),
"--server=%s" % server_name
]
if virt_name:
koan_args.append("--virt-name=%s" % virt_name)
if virt_path:
koan_args.append("--virt-path=%s" % virt_path)
if not graphics:
koan_args.append("--nogfx")
rc = sub_process.call(koan_args,shell=False,close_fds=True)
if rc == 0:
return 0
else:
raise codes.FuncException("koan returned %d" % rc)
def shutdown(self, vmid):
"""
Make the machine with the given vmid stop running.
Whatever that takes.
"""
self.__get_conn()
self.conn.shutdown(vmid)
return 0
def pause(self, vmid):
"""
Pause the machine with the given vmid.
"""
self.__get_conn()
self.conn.suspend(vmid)
return 0
def unpause(self, vmid):
"""
Unpause the machine with the given vmid.
"""
self.__get_conn()
self.conn.resume(vmid)
return 0
def create(self, vmid):
"""
Start the machine via the given mac address.
"""
self.__get_conn()
self.conn.create(vmid)
return 0
def destroy(self, vmid):
"""
Pull the virtual power from the virtual domain, giving it virtually no
time to virtually shut down.
"""
self.__get_conn()
self.conn.destroy(vmid)
return 0
def undefine(self, vmid):
"""
Stop a domain, and then wipe it from the face of the earth.
by deleting the disk image and it's configuration file.
"""
self.__get_conn()
self.conn.undefine(vmid)
return 0
def get_status(self, vmid):
"""
Return a state suitable for server consumption. Aka, codes.py values, not XM output.
"""
self.__get_conn()
return self.conn.get_status(vmid)
|
pombredanne/func
|
func/minion/modules/virt.py
|
Python
|
gpl-2.0
| 9,204
|
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This module contains tests for the bodhi.server.scripts.untag_branched module.
"""
from cStringIO import StringIO
from mock import patch
from bodhi.server.scripts import untag_branched
from bodhi.tests.server.base import BaseTestCase
class TestUsage(BaseTestCase):
"""
This class contains tests for the usage() function.
"""
@patch('sys.exit')
@patch('sys.stdout', new_callable=StringIO)
def test_usage(self, stdout, exit):
"""
Make sure the usage info is printed and then it exits.
"""
argv = ['untag_branched']
untag_branched.usage(argv)
self.assertEqual(
stdout.getvalue(),
'usage: untag_branched <config_uri>\n(example: "untag_branched development.ini")\n')
exit.assert_called_once_with(1)
|
tyll/bodhi
|
bodhi/tests/server/scripts/test_untag_branched.py
|
Python
|
gpl-2.0
| 1,514
|
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from google.api_core.gapic_v1 import client_info
from google.api_core.gapic_v1 import config
from google.api_core.gapic_v1 import method
from google.api_core.gapic_v1 import routing_header
__all__ = ["client_info", "config", "method", "routing_header"]
if sys.version_info >= (3, 6):
from google.api_core.gapic_v1 import config_async # noqa: F401
from google.api_core.gapic_v1 import method_async # noqa: F401
__all__.append("config_async")
__all__.append("method_async")
|
Servir-Mekong/SurfaceWaterTool
|
lib/google/api_core/gapic_v1/__init__.py
|
Python
|
gpl-3.0
| 1,079
|
# -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
#########################################################################
## This is a sample controller
## - index is the default action of any application
## - user is required for authentication and authorization
## - download is for downloading files uploaded in the db (does streaming)
## - call exposes all registered services (none by default)
#########################################################################
@auth.requires_login()
# @auth.requires_membership('Super-Administrator')
@auth.requires(auth.has_membership('Super-Administrator') or auth.has_membership('Evaluator360'))
def evaluation():
query=db.evaluation
import cpfecys
cperiod = cpfecys.current_year_period()
db.evaluation.period.default = cperiod.id
db.evaluation.period.writable = False
db.evaluation.period.readable = False
grid = SQLFORM.smartgrid(query, csv=False)
return dict(grid=grid)
#emarquez
@auth.requires_login()
@auth.requires_membership('Super-Administrator')
def evaluation_period():
query=db.evaluation_period
import cpfecys
#cperiod = cpfecys.current_year_period()
#db.evaluation.period.default = cperiod.id
#db.evaluation.period.writable = False
#db.evaluation.period.readable = False
grid = SQLFORM.smartgrid(query, csv=False)
return dict(grid=grid)
@auth.requires_login()
@auth.requires(auth.has_membership('Super-Administrator') or auth.has_membership('Evaluator360'))
def template():
query=db.evaluation_template
grid = SQLFORM.smartgrid(query, csv=False)
return dict(grid=grid)
@auth.requires_login()
@auth.requires(auth.has_membership('Super-Administrator') or auth.has_membership('Evaluator360'))
def evaluation_type():
query=db.evaluation_type
grid = SQLFORM.smartgrid(query, csv=False)
return dict(grid=grid)
@auth.requires_login()
@auth.requires(auth.has_membership('Super-Administrator') or auth.has_membership('evaluator360'))
def repository_evaluation():
query=db.repository_evaluation
grid = SQLFORM.smartgrid(query, csv=False,create=False,editable=False,deletable=False)
return dict(grid=grid)
@auth.requires_login()
@auth.requires(auth.has_membership('Super-Administrator') or auth.has_membership('Evaluator360'))
def answer_type():
query=db.answer_type
grid = SQLFORM.smartgrid(query, csv=False)
return dict(grid=grid)
@auth.requires_login()
@auth.requires(auth.has_membership('Super-Administrator') or auth.has_membership('Evaluator360'))
def answer():
query=db.answer
grid = SQLFORM.grid(query, csv=False)
return dict(grid=grid)
@auth.requires_login()
@auth.requires(auth.has_membership('Super-Administrator') or auth.has_membership('Evaluator360'))
def evaluation_question():
query=db.evaluation_question
grid = SQLFORM.grid(query, csv=False)
return dict(grid=grid)
@auth.requires_login()
@auth.requires(auth.has_membership('Super-Administrator') or auth.has_membership('Evaluator360'))
def question_type():
query=db.question_type
grid = SQLFORM.grid(query, csv=False)
return dict(grid=grid)
@auth.requires_login()
@auth.requires(auth.has_membership('Student') or auth.has_membership('Teacher') or auth.has_membership('Academic'))
def evaluation_reply():
import cpfecys
cperiod = cpfecys.current_year_period()
project = request.vars['project']
period = request.vars['period']
evaluation = request.vars['evaluation']
evaluated = request.vars['evaluated']
if db((db.auth_user.id==auth.user.id) & \
(db.user_project.project==project) & \
((db.user_project.period <= period) & ((db.user_project.period + db.user_project.periods) > period))).select().first() is None:
academic_var = db.academic(db.academic.id_auth_user==auth.user.id)
if db((db.academic_course_assignation.carnet==academic_var.id)&(db.academic_course_assignation.semester==period)&(db.academic_course_assignation.assignation==project)).select().first() is None:
session.flash =T('Not authorized')
redirect(URL('default','index'))
evaluated = db((db.auth_user.id==evaluated) & \
((db.user_project.period <= period) & ((db.user_project.period + db.user_project.periods) > period))).select().first()
if evaluated is None:
session.flash =T('Not authorized')
redirect(URL('default','index'))
var_evaluation = db((db.evaluation.id == evaluation)).select().first()
if (str(period) != str(cperiod.id)):
session.flash =T('Not authorized')
redirect(URL('default','index'))
user_role = None
if auth.has_membership('Student') or auth.has_membership('Teacher'):
try:
if db((db.user_project.assigned_user == auth.user.id) & \
((db.user_project.period <= period) & ((db.user_project.period + db.user_project.periods) > period))).select().first() is not None:
if auth.has_membership('Student'):
user_role = 2
else:
user_role = 3
except:
None
if auth.has_membership('Academic'):
try:
academic_var = db.academic(db.academic.id_auth_user==auth.user.id)
if db((db.academic_course_assignation.carnet==academic_var.id)&(db.academic_course_assignation.semester==period)).select().first() is not None:
user_role = 5
except:
None
if var_evaluation.repository_evaluation.user_type_evaluator != user_role:
session.flash =T('Not authorized')
redirect(URL('default','index'))
var_repository_evaluation = db(db.repository_evaluation.id == var_evaluation.repository_evaluation).select().first()
question_category = db(db.question_repository.repository_evaluation == var_evaluation.repository_evaluation).select(db.question_repository.question_type_name,distinct=True)
evaluation_result = db((db.evaluation_result.repository_evaluation == var_evaluation.repository_evaluation) &\
(db.evaluation_result.evaluated == evaluated.auth_user.id) & \
(db.evaluation_result.period == period) & \
(db.evaluation_result.project == project) ).select().first()
if (evaluation_result is not None) and \
(db((db.evaluation_auth_user.evaluation_result == evaluation_result.id) &\
(db.evaluation_auth_user.evaluator == auth.user.id) ).select().first() is not None):
session.flash =T('Not authorized')
redirect(URL('default','index'))
if (request.args(0) == 'send'):
if evaluation_result is None:
evaluation_result_id = db.evaluation_result.insert(repository_evaluation = var_evaluation.repository_evaluation,
evaluated = evaluated.auth_user.id,
period = period,
project = project)
else:
evaluation_result_id = evaluation_result.id
evaluation_auth_user = db((db.evaluation_auth_user.evaluation_result == evaluation_result_id) &\
(db.evaluation_auth_user.evaluator == auth.user.id) ).select().first()
if evaluation_auth_user is None:
db.evaluation_auth_user.insert(evaluation_result = evaluation_result_id,
evaluator = auth.user.id)
question_query = db((db.question_repository.repository_evaluation == var_evaluation.repository_evaluation)).select()
for question in question_query:
answer_query = db((db.repository_answer.question_repository == question.id) ).select()
if len(answer_query) == 0:
if (request.vars['group_'+str(question.id)] is not None) & (request.vars['group_'+str(question.id)] != ""):
db.evaluation_solve_text.insert(evaluation_result = evaluation_result_id,
question_repository = question.id,
answer = request.vars['group_'+str(question.id)])
else:
for answer in answer_query:
if answer.exclusive_one_answer == True:
if request.vars['group_'+str(question.id)] is not None:
if str(request.vars['group_'+str(question.id)]) == str(answer.id):
evaluation_solve_detail = db((db.evaluation_solve_detail.evaluation_result == evaluation_result_id) &\
(db.evaluation_solve_detail.question_repository == question.id) & \
(db.evaluation_solve_detail.repository_answer == answer.id)).select().first()
if evaluation_solve_detail is None:
db.evaluation_solve_detail.insert(evaluation_result = evaluation_result_id,
question_repository = question.id,
repository_answer = answer.id,
total_count = 1)
else:
db(db.evaluation_solve_detail.id == evaluation_solve_detail.id).update(total_count = (evaluation_solve_detail.total_count + 1) )
session.flash = T('The evaluation has been sent')
redirect(URL('evaluation','evaluation_list', vars=dict(period=period,project=project) ))
return dict(var_evaluation = var_evaluation,
var_repository_evaluation = var_repository_evaluation,
question_category = question_category,
evaluated = evaluated)
@auth.requires_login()
@auth.requires(auth.has_membership('Student') or auth.has_membership('Teacher') or auth.has_membership('Academic'))
def evaluation_list():
project = request.vars['project']
period = request.vars['period']
#User information
first_name = auth.user.first_name
last_name = auth.user.last_name
from datetime import date
users_project = db((db.user_project.project==project) & \
((db.user_project.period <= period) & ((db.user_project.period + db.user_project.periods) > period))).select()
user_role = None
user = None
if auth.has_membership('Student') or auth.has_membership('Teacher'):
try:
user = db((db.user_project.assigned_user == auth.user.id) & \
((db.user_project.period <= period) & ((db.user_project.period + db.user_project.periods) > period)) & (db.user_project.project==project) ).select().first()
if user is not None:
if auth.has_membership('Student'):
user_role = 2
else:
user_role = 3
except:
None
if auth.has_membership('Academic'):
try:
if user is None:
academic_var = db.academic(db.academic.id_auth_user==auth.user.id)
if db((db.academic_course_assignation.carnet==academic_var.id)&(db.academic_course_assignation.semester==period)&(db.academic_course_assignation.assignation==project)).select().first() is not None:
user_role = 5
else:
session.flash =T('Not authorized')
redirect(URL('default','index'))
except:
None
#RGUARAN: se muestran resultados si fue evaluado y está asignado a ese proyecto
evaluations = db(db.evaluation.period == period).select()
isEvaluated = db(db.evaluation_result.evaluated == auth.user.id).select().first()
isTutor = db((db.user_project.assigned_user == auth.user.id)&
(db.user_project.project == project)).select().first()
flag = False
if isEvaluated != None and isTutor != None:
flag = True
else:
flag = False
return dict(evaluations=evaluations,
users_project=users_project,
project=project,
user_role=user_role,
period=period,
first_name=first_name,
last_name=last_name,
flag=flag)
@auth.requires_login()
@auth.requires(auth.has_membership('Super-Administrator') or auth.has_membership('Evaluator360'))
def question_template():
question_type = request.vars['question_type']
answer_type = request.vars['answer_type']
template_id = request.vars['template_id']
question_var = request.vars['question']
answer_obligate = request.vars['answer_obligate']
if str(answer_obligate).lower() == "true":
answer_obligate_var = True
else:
answer_obligate_var = False
message_var = None
if request.vars['operation'] == 'add_question':
if (question_var is None) or (question_var == ""):
message_var = T('The question can not be empty.')
else:
var_answer_type = db((db.answer_type.id == answer_type)).select().first()
var_answer = db((db.answer.answer_type == answer_type)).select()
add_question_bol = True
#if var_answer is not None:
#if var_answer_type.exclusive_one_answer == True:
#total_grade = 0
#for answer in var_answer:
# total_grade = total_grade + answer.grade
#pass
#if total_grade != 100:
# message_var = T("No question was not added because the responses in this type of response does not sum up to 100. The sum of the responses are: ")+str(total_grade)
# add_question_bol = False
if add_question_bol == True:
var_question = db((db.evaluation_question.question == question_var)).select().first()
if var_question is None:
db.evaluation_question.insert(question = question_var)
var_question = db((db.evaluation_question.question == question_var)).select().first()
var_question_temp = db((db.evaluation_template_detail.evaluation_template == template_id)&(db.evaluation_template_detail.evaluation_question == var_question.id)&(db.evaluation_template_detail.question_type == question_type)).select().first()
if var_question_temp is None:
db.evaluation_template_detail.insert(evaluation_template = template_id,
evaluation_question = var_question.id,
question_type = question_type,
obligatory = answer_obligate_var,
answer_type = answer_type)
message_var = T('Question has been added.')
else:
message_var = "Error!!! " + T('The question has already been added.')
return dict(question_type = question_type,
template_id = template_id,
answer_type = answer_type,
message_var = message_var)
@auth.requires_login()
@auth.requires(auth.has_membership('Super-Administrator') or auth.has_membership('Evaluator360'))
def evaluation_template_detail():
template_id = request.vars['template_id']
if str(template_id) != str('None'):
list_evaluation_template_detail = db(db.evaluation_template_detail.evaluation_template == template_id).select(db.evaluation_template_detail.question_type,distinct=True)
temp_list = []
for temporal_var in list_evaluation_template_detail:
if temporal_var is not None:
temp_list.append(temporal_var.question_type)
list_evaluation_template_detail = temp_list
else:
list_evaluation_template_detail = []
return dict(list_evaluation_template_detail = list_evaluation_template_detail,
template_id = template_id)
@auth.requires_login()
@auth.requires(auth.has_membership('Super-Administrator') or auth.has_membership('Evaluator360'))
def evaluation_template():
template_id = request.vars['template_id']
op_select = request.vars['op_select']
ev_type = request.vars['ev_type']
if (ev_type is not None) and (template_id is not None):
db(db.evaluation_template.id == template_id).update(evaluation_type = ev_type)
select_form = FORM(INPUT(_name='ev_te_de_id',_type='text'))
if select_form.accepts(request.vars,formname='remove_question'):
db(db.evaluation_template_detail.id==select_form.vars.ev_te_de_id).delete()
response.flash = T('Question removed')
add_to_history = FORM(INPUT(_name='hitory_name',_type='text'))
if add_to_history.accepts(request.vars,formname='add_to_history'):
if add_to_history.vars.hitory_name == "":
response.flash = "Error! " + T('You must enter a name')
else:
try:
var_template = db(db.evaluation_template.id == template_id).select().first()
eval_h_id = db.repository_evaluation.insert(name = add_to_history.vars.hitory_name,
template_name = var_template.name,
evaluation_type_name = var_template.evaluation_type.name,
user_type_evaluated = var_template.evaluation_type.user_type_evaluated,
user_type_evaluator = var_template.evaluation_type.user_type_evaluator)
var_template_detail = db(db.evaluation_template_detail.evaluation_template == template_id).select()
for v_t_d in var_template_detail:
question_h_id = db.question_repository.insert(question = v_t_d.evaluation_question.question,
question_type_name = v_t_d.question_type.name,
obligatory = v_t_d.obligatory,
repository_evaluation = eval_h_id)
var_answer_type = db(db.answer.answer_type == v_t_d.answer_type).select()
for v_a_t in var_answer_type:
db.repository_answer.insert(answer = v_a_t.answer,
answer_type_name = v_a_t.answer_type.name,
grade = v_a_t.grade,
exclusive_one_answer = v_a_t.answer_type.exclusive_one_answer,
question_repository = question_h_id)
response.flash = T('Evaluation has been added to the repository evaluation')
except:
response.flash = "Error! " + T('An assessment already exists in the repository of evaluations with that name')
add_question_form = FORM(INPUT(_name='question_id',_type='text'))
if add_question_form.accepts(request.vars,formname='add_question'):
question_id = request.vars['question_id']
if question_id is not None:
var_question_temp = db((db.evaluation_template_detail.evaluation_template == template_id)&(db.evaluation_template_detail.evaluation_question == question_id)).select().first()
if var_question_temp is None:
db.evaluation_template_detail.insert(evaluation_template = template_id,
evaluation_question = question_id)
response.flash = T('Question has been added.')
else:
response.flash = T('The question has already been added.')
evaluation_template_list=db(db.evaluation_template).select()
db.evaluation_template.date_created.writable = False
form=crud.create(db.evaluation_template, next=URL('evaluation','evaluation_template',vars=dict(op_select = 2) ),message=T("Template has been created"))
if form.errors:
op_select = '2'
response.flash = T('Error')
form_answer_create = crud.create(db.answer, next=URL('evaluation','evaluation_template',vars=dict(op_select = 3) ),message=T("Answer has been created"))
if form_answer_create.errors:
op_select = '3'
response.flash = T('Error')
form_answer_type_create = crud.create(db.answer_type, next=URL('evaluation','evaluation_template',vars=dict(op_select = 4) ),message=T("Type answer has been created"))
if form_answer_type_create.errors:
op_select = '4'
response.flash = T('Error')
form_question_type_create = crud.create(db.question_type, next=URL('evaluation','evaluation_template',vars=dict(op_select = 5) ),message=T("Type question has been created"))
if form_question_type_create.errors:
op_select = '5'
response.flash = T('Error')
form_evaluation_type_create = crud.create(db.evaluation_type, next=URL('evaluation','evaluation_template',vars=dict(op_select = 6) ),message=T("Type evaluation has been created"))
if form_evaluation_type_create.errors:
op_select = '6'
response.flash = T('Error')
return dict(evaluation_template_list = evaluation_template_list,
form = form,
form_answer_create = form_answer_create,
form_answer_type_create = form_answer_type_create,
form_question_type_create = form_question_type_create,
form_evaluation_type_create = form_evaluation_type_create,
op_select = op_select,
template_id = template_id)
@auth.requires_login()
@auth.requires(auth.has_membership('Student') or auth.has_membership('Teacher') or auth.has_membership('Super-Administrator') or auth.has_membership('Evaluator360') or auth.has_membership('Ecys-Administrator'))
def results():
#Import DB
import cpfecys
from decimal import Decimal
first_name = ''
last_name = ''
user_id = None
fullname = ''
periods = []
group = ''
nameperiod = ''
if auth.has_membership('Student') or auth.has_membership('Teacher'):
if auth.has_membership('Ecys-Administrator'):
first_name = ''
last_name = ''
user_id = None
else:
#User information
first_name = auth.user.first_name
last_name = auth.user.last_name
user_id = auth.user.id
fullname = first_name + ' ' + last_name
#Obtain the current period of the system and all the register periods
period = cpfecys.current_year_period()
periods_temp = db(db.period_year).select(orderby=~db.period_year.id)
for period_temp in periods_temp:
try:
if db((db.user_project.assigned_user==auth.user.id)&\
(db.user_project.period == db.period_year.id)&\
((db.user_project.period <= period_temp.id) & \
((db.user_project.period + db.user_project.periods) > period_temp.id))).select(db.user_project.ALL).first() is not None:
periods.append(period_temp)
except:
None
#Check if the period has changed
if request.vars['period'] is None:
None
else:
if request.vars['period']!='':
period = request.vars['period']
period = db(db.period_year.id==period).select().first()
else:
session.flash = T('Not valid Action.')
redirect(URL('default','index'))
pass
pass
if first_name == '' and user_id is None:
if request.vars['period'] is None:
None
else:
if request.vars['period']!='':
period = request.vars['period']
user_id = request.vars['user']
group = request.vars['group']
qFullname = db(db.auth_user.id == user_id).select(db.auth_user.first_name, db.auth_user.last_name).first()
fullname = qFullname.first_name + ' ' + qFullname.last_name
nameperiod = db(db.period_year.id==period).select().first()
else:
session.flash = T('Not valid Action.')
redirect(URL('default','index'))
pass
columnTotal=((db.answer.grade * db.evaluation_solve_detail.total_count).sum()/(db.evaluation_solve_detail.total_count.sum())).with_alias('total')
rows=db((db.evaluation_solve_detail.evaluation_result == db.evaluation_result.id)&
(db.evaluation_solve_detail.repository_answer == db.repository_answer.id)&
(db.evaluation_solve_detail.question_repository == db.question_repository.id)&
(db.repository_answer.question_repository == db.question_repository.id)&
(db.evaluation_result.repository_evaluation == db.repository_evaluation.id)&
(db.repository_answer.answer == db.answer.answer)&
(db.evaluation_result.evaluated == db.auth_user.id)&
(db.auth_user.id == user_id)&
(db.evaluation_result.period == period)).select(db.question_repository.question,columnTotal,groupby=db.question_repository.id)
#print '********************LAST SQL'
#print db._lastsql
##calcular el promedio de los resultados
suma = Decimal(0)
promedio = 0
constantCien = Decimal(100)
graphVal = Decimal(0)
resGraph = []
if rows.first() is not None:
for row in rows:
#print row.question_repository.question, row.total
suma = row.total + suma
graphVal = row.total / constantCien
resGraph.append("%.3f" % (graphVal))
pass
avg = suma / len(rows)
promedio = int(avg)
pass
gridAnswer=db(db.answer).select(db.answer.grade, db.answer.answer)
#gridAnswer = SQLFORM.grid(query, fields = [db.answer.grade, db.answer.answer], csv=False,deletable=False,
#editable=False, details=False, selectable=None, create=False, searchable=False,sortable=False,
#links_in_grid=False, user_signature=False, showbuttontext=False, ignore_rw = True)
min_average = 40
max_average = 60
return dict(periods=periods,period=period,periodo=period,
fullname=fullname,rows=rows,promedio=promedio,
gridAnswer=gridAnswer,resGraph=resGraph,
min_average=min_average,max_average=max_average,
group=group,nameperiod=nameperiod)
@auth.requires_login()
@auth.requires(auth.has_membership('Super-Administrator') or auth.has_membership('Evaluator360') or auth.has_membership('Ecys-Administrator') )
def results_search():
#Import DB
import cpfecys
from decimal import Decimal
if auth.has_membership('Super-Administrator') or auth.has_membership('Evaluator360') or auth.has_membership('Ecys-Administrator'):
#User information
first_name = auth.user.first_name
last_name = auth.user.last_name
user_id = auth.user.id
fullname = first_name + ' ' + last_name
#Obtain the current period of the system and all the register periods
period = cpfecys.current_year_period()
periods = db(db.period_year).select()
group = db((db.auth_group.id == 2)|\
(db.auth_group.id == 3)|\
(db.auth_group.id == 6)).select(db.auth_group.id).first()
groups = db((db.auth_group.id == 2)|\
(db.auth_group.id == 3)|\
(db.auth_group.id == 6)).select()
#Check if the period has changed
if request.vars['period'] is None:
None
else:
if request.vars['period']!='':
period = request.vars['period']
period = db(db.period_year.id==period).select().first()
else:
session.flash = T('Not valid Action.')
redirect(URL('default','index'))
#check for type of rol selected
if request.vars['group'] is None:
None
else:
if request.vars['group']!='':
group = request.vars['group']
group = db(db.auth_group.id==group).select().first()
else:
session.flash = T('Not valid Action.')
redirect(URL('default','index'))
columnTotal=((db.answer.grade * db.evaluation_solve_detail.total_count).sum()/(db.evaluation_solve_detail.total_count.sum())).with_alias('total')
rows=db((db.auth_membership.user_id == db.auth_user.id)&
(db.auth_membership.group_id == db.auth_group.id)&
(db.evaluation_result.evaluated == db.auth_user.id)&
(db.evaluation_solve_detail.evaluation_result == db.evaluation_result.id)&
(db.evaluation_solve_detail.repository_answer == db.repository_answer.id)&
(db.evaluation_solve_detail.question_repository == db.question_repository.id)&
(db.repository_answer.question_repository == db.question_repository.id)&
(db.evaluation_result.repository_evaluation == db.repository_evaluation.id)&
(db.repository_answer.answer == db.answer.answer)&
(db.repository_evaluation.user_type_evaluated == db.auth_group.id)&
(db.auth_group.id == group)&
(db.evaluation_result.period == period)).select(db.question_repository.question,columnTotal,groupby=db.question_repository.id)
suma = Decimal(0)
promedio = 0
constantCien = Decimal(100)
graphVal = Decimal(0)
resGraph = []
labelGraph = []
if rows.first() is not None:
for row in rows:
suma = row.total + suma
graphVal = row.total / constantCien
resGraph.append("%.3f" % (graphVal))
labelGraph.append(row.question_repository.question)
avg = suma / len(rows)
promedio = int(avg)
gridAnswer=db(db.answer).select(db.answer.grade, db.answer.answer)
else:
session.flash =T('Not authorized')
redirect(URL('default','index'))
min_average = 40
max_average = 60
return dict(periods=periods,period=period,periodo=period,
fullname=fullname,rows=rows,promedio=promedio,
gridAnswer=gridAnswer,resGraph=resGraph,
min_average=min_average,max_average=max_average,
group=group,groups=groups,grupo=group)
@auth.requires_login()
@auth.requires(auth.has_membership('Super-Administrator') or auth.has_membership('Evaluator360') or auth.has_membership('Ecys-Administrator') )
def search():
#Import DB
import cpfecys
from decimal import Decimal
if auth.has_membership('Super-Administrator') or auth.has_membership('Evaluator360') or auth.has_membership('Ecys-Administrator'):
#Obtain the current period of the system and all the register periods
period = cpfecys.current_year_period()
periods = db(db.period_year).select()
# 2=student; 3= teacher; 6=ecys-administrator
group = db((db.auth_group.id == 2)|\
(db.auth_group.id == 3)|\
(db.auth_group.id == 6)).select(db.auth_group.id).first()
groups = db((db.auth_group.id == 2)|\
(db.auth_group.id == 3)|\
(db.auth_group.id == 6)).select()
#Check if the period has changed
if request.vars['period'] is None:
None
else:
if request.vars['period']!='':
period = request.vars['period']
period = db(db.period_year.id==period).select().first()
else:
session.flash = T('Not valid Action.')
redirect(URL('default','index'))
#check for type of rol selected
if request.vars['group'] is None:
None
else:
if request.vars['group']!='':
group = request.vars['group']
group = db(db.auth_group.id==group).select().first()
else:
session.flash = T('Not valid Action.')
redirect(URL('default','index'))
rows=db((db.auth_membership.user_id == db.auth_user.id)&
(db.auth_membership.group_id == db.auth_group.id)&
(db.evaluation_result.evaluated == db.auth_user.id)&
(db.auth_group.id == group)&
(db.evaluation_result.period == period)).select(db.evaluation_result.period,db.auth_user.id, db.auth_group.id,db.auth_user.username,db.auth_user.username,db.auth_user.first_name,db.auth_user.last_name,groupby=db.auth_user.id)
else:
session.flash =T('Not authorized')
redirect(URL('default','index'))
return dict(periods=periods,period=period,periodo=period,
rows=rows,group=group,groups=groups,grupo=group)
@auth.requires_login()
@auth.requires(auth.has_membership('Super-Administrator') or auth.has_membership('Evaluator360') or auth.has_membership('Ecys-Administrator'))
def graph():
#Import DB
import cpfecys
from decimal import Decimal
if auth.has_membership('Super-Administrator') or auth.has_membership('Evaluator360') or auth.has_membership('Ecys-Administrator'):
#Obtain the current period of the system and all the register periods
period = cpfecys.current_year_period()
periods = db(db.period_year).select()
# 2=student; 3= teacher; 6=ecys-administrator
group = db((db.auth_group.id == 2)|\
(db.auth_group.id == 3)|\
(db.auth_group.id == 6)).select(db.auth_group.id).first()
groups = db((db.auth_group.id == 2)|\
(db.auth_group.id == 3)|\
(db.auth_group.id == 6)).select()
#Check if the period has changed
if request.vars['period'] is None:
None
else:
if request.vars['period']!='':
period = request.vars['period']
period = db(db.period_year.id==period).select().first()
else:
session.flash = T('Not valid Action.')
redirect(URL('default','index'))
#check for type of rol selected
if request.vars['group'] is None:
None
else:
if request.vars['group']!='':
group = request.vars['group']
group = db(db.auth_group.id==group).select().first()
else:
session.flash = T('Not valid Action.')
redirect(URL('default','index'))
columnTotal=((db.answer.grade * db.evaluation_solve_detail.total_count).sum()/(db.evaluation_solve_detail.total_count.sum())).with_alias('total')
rows=db((db.auth_membership.user_id == db.auth_user.id)&
(db.auth_membership.group_id == db.auth_group.id)&
(db.evaluation_result.evaluated == db.auth_user.id)&
(db.evaluation_solve_detail.evaluation_result == db.evaluation_result.id)&
(db.evaluation_solve_detail.repository_answer == db.repository_answer.id)&
(db.evaluation_solve_detail.question_repository == db.question_repository.id)&
(db.repository_answer.question_repository == db.question_repository.id)&
(db.evaluation_result.repository_evaluation == db.repository_evaluation.id)&
(db.repository_answer.answer == db.answer.answer)&
(db.repository_evaluation.user_type_evaluated == db.auth_group.id)&
(db.auth_group.id == group)&
(db.evaluation_result.period == period)).select(db.question_repository.question,columnTotal,groupby=db.question_repository.id)
constantCien = Decimal(100)
graphVal = Decimal(0)
resGraph = []
labelGraph = []
for row in rows:
graphVal = row.total / constantCien
resGraph.append("%.3f" % (graphVal))
labelGraph.append(row.question_repository.question)
pass
gridAnswer=db(db.answer).select(db.answer.grade, db.answer.answer)
else:
session.flash =T('Not authorized')
redirect(URL('default','index'))
return dict(periods=periods,period=period,periodo=period,
gridAnswer=gridAnswer,resGraph=resGraph,labelGraph=labelGraph,
group=group,groups=groups,grupo=group)
|
DaytonGarcia/quizmodule
|
controllers/evaluation.py
|
Python
|
gpl-3.0
| 37,242
|
#!/usr/bin/python
# (c) Nelen & Schuurmans. GPL licensed.
from __future__ import division, print_function
from django.conf import settings
import urlparse
from twitter import *
from django.contrib.gis.geos import Point
from lizard_sticky_twitterized.models import StickyTweet
import locale
from datetime import datetime
from django.utils import timezone
def search_twitter(*args, **options):
consumer_key = getattr(settings, 'CONSUMER_KEY')
consumer_secret = getattr(settings, 'CONSUMER_SECRET')
access_token = getattr(settings, 'ACCES_TOKEN')
access_secret = getattr(settings, 'ACCES_SECRET')
t = Twitter(auth=OAuth(access_token, access_secret,
consumer_key, consumer_secret))
search_params = dict(q=args, count=100, geocode="52.09,5.10,160km",
result_type='recent', include_entities='1')
tweets = t.search.tweets(**search_params)
while tweets:
for tweet in tweets.get('statuses'):
writer = TweetWriter(tweet)
writer.store()
next_results = tweets['search_metadata'].get('next_results')
if next_results:
qs = next_results[1:]
qs_dict = urlparse.parse_qs(qs, keep_blank_values=True)
tweets = t.search.tweets(max_id=qs_dict['max_id'][0],
**search_params)
else:
tweets = None
delete_duplicates()
def delete_duplicates():
for row in StickyTweet.objects.all():
if StickyTweet.objects.filter(status_id=row.status_id).count() > 1:
row.delete()
class TweetWriter():
"""
Stores the content of a tweet if the tweet contains coordinates.
Overwrites old tweets when the specified storage limit has been reached (default 300).
"""
def __init__(self, tweet, limit=3000):
self.tweet = tweet
self.limit = limit
def store(self):
"""
Either stores geo-coded tweets as new entries or overwrites oldest
"""
tweet = self.tweet
if tweet.get('coordinates') is not None:
if self._full():
self._store_tweet(StickyTweet.objects.order_by('time')[0])
else:
self._store_tweet(StickyTweet())
def _store_tweet(self, new_tweet):
tweet = self.tweet
new_tweet.twitter_name = tweet.get('user').get('screen_name')
new_tweet.tweet = tweet.get('text')
new_tweet.status_id = int(tweet.get('id'))
new_tweet.geom = Point(
float(tweet.get('coordinates').get('coordinates')[0]),
float(tweet.get('coordinates').get('coordinates')[1])
)
new_tweet.time = self._tweet_time(tweet.get('created_at'))
try:
new_tweet.media_url = tweet.get('entities').get('media')[0].get(
'media_url')
except (AttributeError, TypeError):
pass
new_tweet.save()
def _full(self):
limit = self.limit-1
if StickyTweet.objects.count() > limit:
return True
def _tweet_time(self, created_at):
locale.setlocale(locale.LC_TIME, "en_US.utf8")
time = datetime.strptime(created_at,
'%a %b %d %H:%M:%S +0000 %Y')
return timezone.make_aware(time, timezone.utc)
|
lizardsystem/lizard-sticky-twitterized
|
lizard_sticky_twitterized/twitter_connector.py
|
Python
|
gpl-3.0
| 3,321
|
#!/usr/bin/env python
# --!-- coding: utf8 --!--
from PyQt5.QtWidgets import QListView
from manuskript import settings
from manuskript.functions import findBackground
from manuskript.ui.views.corkDelegate import corkDelegate
from manuskript.ui.views.dndView import dndView
from manuskript.ui.views.outlineBasics import outlineBasics
class corkView(QListView, dndView, outlineBasics):
def __init__(self, parent=None):
QListView.__init__(self, parent)
dndView.__init__(self, parent)
outlineBasics.__init__(self, parent)
self.setResizeMode(QListView.Adjust)
self.setWrapping(True)
self.setItemDelegate(corkDelegate())
self.setSpacing(5)
self.setVerticalScrollMode(self.ScrollPerPixel)
self.setFlow(self.LeftToRight)
self.setSelectionBehavior(self.SelectRows)
self.updateBackground()
def updateBackground(self):
if settings.corkBackground["image"] != "":
img = findBackground(settings.corkBackground["image"])
if img == None:
img = ""
else:
# No background image
img = ""
self.setStyleSheet("""QListView {{
background:{color};
background-image: url({url});
background-attachment: fixed;
}}""".format(
color=settings.corkBackground["color"],
url=img.replace("\\", "/")
))
def dragMoveEvent(self, event):
dndView.dragMoveEvent(self, event)
QListView.dragMoveEvent(self, event)
def mouseReleaseEvent(self, event):
QListView.mouseReleaseEvent(self, event)
outlineBasics.mouseReleaseEvent(self, event)
def mouseDoubleClickEvent(self, event):
if self.selectedIndexes() == []:
idx = self.rootIndex()
parent = idx.parent()
from manuskript.functions import MW
MW.openIndex(parent)
#self.setRootIndex(parent)
else:
r = QListView.mouseDoubleClickEvent(self, event)
|
gedakc/manuskript
|
manuskript/ui/views/corkView.py
|
Python
|
gpl-3.0
| 2,083
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for qutebrowser.commands.runners."""
import pytest
from qutebrowser.commands import runners, cmdexc
class TestCommandRunner:
"""Tests for CommandRunner."""
def test_parse_all(self, cmdline_test):
"""Test parsing of commands.
See https://github.com/The-Compiler/qutebrowser/issues/615
Args:
cmdline_test: A pytest fixture which provides testcases.
"""
cr = runners.CommandRunner(0)
if cmdline_test.valid:
list(cr.parse_all(cmdline_test.cmd, aliases=False))
else:
with pytest.raises(cmdexc.NoSuchCommandError):
list(cr.parse_all(cmdline_test.cmd, aliases=False))
def test_parse_all_with_alias(self, cmdline_test, config_stub):
config_stub.data = {'aliases': {'alias_name': cmdline_test.cmd}}
cr = runners.CommandRunner(0)
if cmdline_test.valid:
assert len(list(cr.parse_all("alias_name"))) > 0
else:
with pytest.raises(cmdexc.NoSuchCommandError):
list(cr.parse_all("alias_name"))
@pytest.mark.parametrize('command', ['', ' '])
def test_parse_empty_with_alias(self, command):
"""An empty command should not crash.
See https://github.com/The-Compiler/qutebrowser/issues/1690
and https://github.com/The-Compiler/qutebrowser/issues/1773
"""
cr = runners.CommandRunner(0)
with pytest.raises(cmdexc.NoSuchCommandError):
list(cr.parse_all(command))
def test_partial_parsing(self):
"""Test partial parsing with a runner where it's enabled.
The same with it being disabled is tested by test_parse_all.
"""
cr = runners.CommandRunner(0, partial_match=True)
result = cr.parse('message-i')
assert result.cmd.name == 'message-info'
|
EliteTK/qutebrowser
|
tests/unit/commands/test_runners.py
|
Python
|
gpl-3.0
| 2,651
|
# -*- coding: utf-8 -*-
#
# Copyright © 2013 The Spyder Development Team
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
"""
Rope introspection plugin
"""
import time
from spyderlib import dependencies
from spyderlib.baseconfig import get_conf_path, _, STDERR
from spyderlib.utils import encoding, programs
from spyderlib.py3compat import PY2
from spyderlib.utils.dochelpers import getsignaturefromtext
from spyderlib.utils import sourcecode
from spyderlib.utils.debug import log_last_error, log_dt
from spyderlib.utils.introspection.plugin_manager import (
DEBUG_EDITOR, LOG_FILENAME, IntrospectionPlugin)
try:
try:
from spyderlib import rope_patch
rope_patch.apply()
except ImportError:
# rope 0.9.2/0.9.3 is not installed
pass
import rope.base.libutils
import rope.contrib.codeassist
except ImportError:
pass
ROPE_REQVER = '>=0.9.2'
dependencies.add('rope',
_("Editor's code completion, go-to-definition and help"),
required_version=ROPE_REQVER)
#TODO: The following preferences should be customizable in the future
ROPE_PREFS = {'ignore_syntax_errors': True,
'ignore_bad_imports': True,
'soa_followed_calls': 2,
'extension_modules': [],
}
class RopePlugin(IntrospectionPlugin):
"""
Rope based introspection plugin for jedi
Editor's code completion, go-to-definition and help
"""
project = None
# ---- IntrospectionPlugin API --------------------------------------------
name = 'rope'
def load_plugin(self):
print("def load_plugin(self):")
"""Load the Rope introspection plugin"""
if not programs.is_module_installed('rope', ROPE_REQVER):
raise ImportError('Requires Rope %s' % ROPE_REQVER)
self.project = None
self.create_rope_project(root_path=get_conf_path())
def get_completions(self, info):
print("def get_completions(self, info):")
"""Get a list of completions using Rope"""
if self.project is None:
return
filename = info.filename
source_code = info.source_code
offset = info.position
if PY2:
filename = filename.encode('utf-8')
else:
# TODO: test if this is working without any further change in
# Python 3 with a user account containing unicode characters
pass
try:
resource = rope.base.libutils.path_to_resource(self.project,
filename)
except Exception as _error:
if DEBUG_EDITOR:
log_last_error(LOG_FILENAME, "path_to_resource: %r" % filename)
resource = None
try:
if DEBUG_EDITOR:
t0 = time.time()
proposals = rope.contrib.codeassist.code_assist(self.project,
source_code, offset, resource, maxfixes=3)
proposals = rope.contrib.codeassist.sorted_proposals(proposals)
lista = [proposal.name for proposal in proposals]
No_underscore_list = []
for i in lista:
if '_' not in i:
No_underscore_list.append(i)
if DEBUG_EDITOR:
log_dt(LOG_FILENAME, "code_assist/sorted_proposals", t0)
#remove_list = ['c()','c']
remove_list = ["GetLocationOfAndroidImage",
"GetLocationOfImage",
"GetRect",
'c()',
'c',
"Realc",
"Realr",
"Show",
"ShowAndroid",
"StartMonitorApp",
"Tap",
"TapOld",
"r",
"t",
"Application",
"Cycles",
"DisplayName",
"ExcludedWindowClassList",
"FriendNameOfButton",
"FullButtonPathName",
"ImageOfButton",
"MaxVal",
"NameOfButton",
"NameOfButtonCapitalized",
"Threshold",
"name"]
for i in remove_list:
try:
No_underscore_list.remove(i)
except Exception as e:
print(str(e))
for i in No_underscore_list:
print(i + '\n')
return No_underscore_list
except Exception as _error: # analysis:ignore
if DEBUG_EDITOR:
log_last_error(LOG_FILENAME, "get_completion_list")
def get_info(self, info):
print("def get_info(self, info):")
"""Get a formatted calltip and docstring from Rope"""
if self.project is None:
return
filename = info.filename
source_code = info.source_code
offset = info.position
if PY2:
filename = filename.encode('utf-8')
else:
#TODO: test if this is working without any further change in
# Python 3 with a user account containing unicode characters
pass
try:
resource = rope.base.libutils.path_to_resource(self.project,
filename)
except Exception as _error:
if DEBUG_EDITOR:
log_last_error(LOG_FILENAME, "path_to_resource: %r" % filename)
resource = None
try:
if DEBUG_EDITOR:
t0 = time.time()
cts = rope.contrib.codeassist.get_calltip(
self.project, source_code, offset, resource,
ignore_unknown=False, remove_self=True, maxfixes=3)
if DEBUG_EDITOR:
log_dt(LOG_FILENAME, "get_calltip", t0)
if cts is not None:
while '..' in cts:
cts = cts.replace('..', '.')
if '(.)' in cts:
cts = cts.replace('(.)', '(...)')
try:
doc_text = rope.contrib.codeassist.get_doc(self.project,
source_code, offset, resource, maxfixes=3)
if DEBUG_EDITOR:
log_dt(LOG_FILENAME, "get_doc", t0)
except Exception as _error:
doc_text = ''
if DEBUG_EDITOR:
log_last_error(LOG_FILENAME, "get_doc")
return self.handle_info(cts, doc_text, source_code, offset)
except Exception as _error: #analysis:ignore
if DEBUG_EDITOR:
log_last_error(LOG_FILENAME, "get_calltip_text")
def handle_info(self, cts, doc_text, source_code, offset):
print("def handle_info(self, cts, doc_text, source_code, offset):")
obj_fullname = ''
calltip = ''
argspec = ''
note = ''
if cts:
cts = cts.replace('.__init__', '')
parpos = cts.find('(')
if parpos:
obj_fullname = cts[:parpos]
obj_name = obj_fullname.split('.')[-1]
cts = cts.replace(obj_fullname, obj_name)
calltip = cts
if ('()' in cts) or ('(...)' in cts):
# Either inspected object has no argument, or it's
# a builtin or an extension -- in this last case
# the following attempt may succeed:
calltip = getsignaturefromtext(doc_text, obj_name)
if not obj_fullname:
obj_fullname = sourcecode.get_primary_at(source_code, offset)
if obj_fullname and not obj_fullname.startswith('self.'):
# doc_text was generated by utils.dochelpers.getdoc
if type(doc_text) is dict:
obj_fullname = doc_text['name'] or obj_fullname
argspec = doc_text['argspec']
note = doc_text['note']
doc_text = doc_text['docstring']
elif calltip:
argspec_st = calltip.find('(')
argspec = calltip[argspec_st:]
module_end = obj_fullname.rfind('.')
module = obj_fullname[:module_end]
note = 'Present in %s module' % module
return dict(name=obj_fullname, argspec=argspec, note=note,
docstring=doc_text, calltip=calltip)
def get_definition(self, info):
print("def get_definition(self, info):")
"""Find a definition location using Rope"""
if self.project is None:
return
filename = info.filename
source_code = info.source_code
offset = info.position
if PY2:
filename = filename.encode('utf-8')
else:
#TODO: test if this is working without any further change in
# Python 3 with a user account containing unicode characters
pass
try:
resource = rope.base.libutils.path_to_resource(self.project,
filename)
except Exception as _error:
if DEBUG_EDITOR:
log_last_error(LOG_FILENAME, "path_to_resource: %r" % filename)
resource = None
try:
if DEBUG_EDITOR:
t0 = time.time()
resource, lineno = rope.contrib.codeassist.get_definition_location(
self.project, source_code, offset, resource, maxfixes=3)
if DEBUG_EDITOR:
log_dt(LOG_FILENAME, "get_definition_location", t0)
if resource is not None:
filename = resource.real_path
if filename and lineno:
return filename, lineno
except Exception as _error: #analysis:ignore
if DEBUG_EDITOR:
log_last_error(LOG_FILENAME, "get_definition_location")
def validate(self):
print("def validate(self):")
"""Validate the Rope project"""
if self.project is not None:
self.project.validate(self.project.root)
def set_pref(self, key, value):
print("def set_pref(self, key, value):")
"""Set a Rope preference"""
if self.project is not None:
self.project.prefs.set(key, value)
# ---- Private API -------------------------------------------------------
def create_rope_project(self, root_path):
print("def create_rope_project(self, root_path):")
"""Create a Rope project on a desired path"""
if PY2:
root_path = encoding.to_fs_from_unicode(root_path)
else:
#TODO: test if this is working without any further change in
# Python 3 with a user account containing unicode characters
pass
try:
import rope.base.project
self.project = rope.base.project.Project(root_path, **ROPE_PREFS)
except ImportError:
print >>STDERR, 'project error'
self.project = None
if DEBUG_EDITOR:
log_last_error(LOG_FILENAME,
"create_rope_project: %r" % root_path)
except TypeError:
# Compatibility with new Mercurial API (>= 1.3).
# New versions of rope (> 0.9.2) already handle this issue
self.project = None
if DEBUG_EDITOR:
log_last_error(LOG_FILENAME,
"create_rope_project: %r" % root_path)
self.validate()
def close_rope_project(self):
print("def close_rope_project(self):")
"""Close the Rope project"""
if self.project is not None:
self.project.close()
if __name__ == '__main__':
from spyderlib.utils.introspection.plugin_manager import CodeInfo
p = RopePlugin()
p.load_plugin()
source_code = "import numpy; numpy.ones"
docs = p.get_info(CodeInfo('info', source_code, len(source_code),
__file__))
assert 'ones(' in docs['calltip'] and 'ones(' in docs['docstring']
source_code = "import numpy; n"
completions = p.get_completions(CodeInfo('completions', source_code,
len(source_code), __file__))
assert 'numpy' in completions
source_code = "import matplotlib.pyplot as plt; plt.imsave"
path, line_nr = p.get_definition(CodeInfo('definition', source_code,
len(source_code), __file__))
assert 'pyplot.py' in path
code = '''
def test(a, b):
"""Test docstring"""
pass
test(1,'''
path, line = p.get_definition(CodeInfo('definition', code, len(code),
'dummy.txt'))
assert line == 2
docs = p.get_info(CodeInfo('info', code, len(code), __file__))
assert 'Test docstring' in docs['docstring']
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/spyderlib/utils/introspection/rope_plugin.py
|
Python
|
gpl-3.0
| 13,334
|
# -*- coding: utf-8 -*-
import io
import logging
import re
from babelfish import Language, language_converters
from guessit import guessit
try:
from lxml import etree
except ImportError:
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
from requests import Session
from zipfile import ZipFile
from . import Provider
from .. import __short_version__
from ..exceptions import ProviderError
from ..subtitle import Subtitle, fix_line_ending, guess_matches
from ..utils import sanitize
from ..video import Episode, Movie
logger = logging.getLogger(__name__)
class PodnapisiSubtitle(Subtitle):
"""Podnapisi Subtitle."""
provider_name = 'podnapisi'
def __init__(self, language, hearing_impaired, page_link, pid, releases, title, season=None, episode=None,
year=None):
super(PodnapisiSubtitle, self).__init__(language, hearing_impaired, page_link)
self.pid = pid
self.releases = releases
self.title = title
self.season = season
self.episode = episode
self.year = year
@property
def id(self):
return self.pid
def get_matches(self, video):
matches = set()
# episode
if isinstance(video, Episode):
# series
if video.series and sanitize(self.title) == sanitize(video.series):
matches.add('series')
# year
if video.original_series and self.year is None or video.year and video.year == self.year:
matches.add('year')
# season
if video.season and self.season == video.season:
matches.add('season')
# episode
if video.episode and self.episode == video.episode:
matches.add('episode')
# guess
for release in self.releases:
matches |= guess_matches(video, guessit(release, {'type': 'episode'}))
# movie
elif isinstance(video, Movie):
# title
if video.title and sanitize(self.title) == sanitize(video.title):
matches.add('title')
# year
if video.year and self.year == video.year:
matches.add('year')
# guess
for release in self.releases:
matches |= guess_matches(video, guessit(release, {'type': 'movie'}))
return matches
class PodnapisiProvider(Provider):
"""Podnapisi Provider."""
languages = ({Language('por', 'BR'), Language('srp', script='Latn')} |
{Language.fromalpha2(l) for l in language_converters['alpha2'].codes})
server_url = 'http://podnapisi.eu/subtitles/'
def initialize(self):
self.session = Session()
self.session.headers['User-Agent'] = 'Subliminal/%s' % __short_version__
def terminate(self):
self.session.close()
def query(self, language, keyword, season=None, episode=None, year=None):
# set parameters, see http://www.podnapisi.net/forum/viewtopic.php?f=62&t=26164#p212652
params = {'sXML': 1, 'sL': str(language), 'sK': keyword}
is_episode = False
if season and episode:
is_episode = True
params['sTS'] = season
params['sTE'] = episode
if year:
params['sY'] = year
# loop over paginated results
logger.info('Searching subtitles %r', params)
subtitles = []
pids = set()
while True:
# query the server
xml = etree.fromstring(self.session.get(self.server_url + 'search/old', params=params, timeout=10).content)
# exit if no results
if not int(xml.find('pagination/results').text):
logger.debug('No subtitles found')
break
# loop over subtitles
for subtitle_xml in xml.findall('subtitle'):
# read xml elements
language = Language.fromietf(subtitle_xml.find('language').text)
hearing_impaired = 'n' in (subtitle_xml.find('flags').text or '')
page_link = subtitle_xml.find('url').text
pid = subtitle_xml.find('pid').text
releases = []
if subtitle_xml.find('release').text:
for release in subtitle_xml.find('release').text.split():
release = re.sub(r'\.+$', '', release) # remove trailing dots
release = ''.join(filter(lambda x: ord(x) < 128, release)) # remove non-ascii characters
releases.append(release)
title = subtitle_xml.find('title').text
season = int(subtitle_xml.find('tvSeason').text)
episode = int(subtitle_xml.find('tvEpisode').text)
year = int(subtitle_xml.find('year').text)
if is_episode:
subtitle = PodnapisiSubtitle(language, hearing_impaired, page_link, pid, releases, title,
season=season, episode=episode, year=year)
else:
subtitle = PodnapisiSubtitle(language, hearing_impaired, page_link, pid, releases, title,
year=year)
# ignore duplicates, see http://www.podnapisi.net/forum/viewtopic.php?f=62&t=26164&start=10#p213321
if pid in pids:
continue
logger.debug('Found subtitle %r', subtitle)
subtitles.append(subtitle)
pids.add(pid)
# stop on last page
if int(xml.find('pagination/current').text) >= int(xml.find('pagination/count').text):
break
# increment current page
params['page'] = int(xml.find('pagination/current').text) + 1
logger.debug('Getting page %d', params['page'])
return subtitles
def list_subtitles(self, video, languages):
if isinstance(video, Episode):
return [s for l in languages for s in self.query(l, video.series, season=video.season,
episode=video.episode, year=video.year)]
elif isinstance(video, Movie):
return [s for l in languages for s in self.query(l, video.title, year=video.year)]
def download_subtitle(self, subtitle):
# download as a zip
logger.info('Downloading subtitle %r', subtitle)
r = self.session.get(self.server_url + subtitle.pid + '/download', params={'container': 'zip'}, timeout=10)
r.raise_for_status()
# open the zip
with ZipFile(io.BytesIO(r.content)) as zf:
if len(zf.namelist()) > 1:
raise ProviderError('More than one file to unzip')
subtitle.content = fix_line_ending(zf.read(zf.namelist()[0]))
|
FireBladeNooT/Medusa_1_6
|
lib/subliminal/providers/podnapisi.py
|
Python
|
gpl-3.0
| 6,970
|
import unittest
from doctest import DocTestSuite
from test import support
import weakref
import gc
# Modules under test
_thread = support.import_module('_thread')
threading = support.import_module('threading')
import _threading_local
class Weak(object):
pass
def target(local, weaklist):
weak = Weak()
local.weak = weak
weaklist.append(weakref.ref(weak))
class BaseLocalTest:
def test_local_refs(self):
self._local_refs(20)
self._local_refs(50)
self._local_refs(100)
def _local_refs(self, n):
local = self._local()
weaklist = []
for i in range(n):
t = threading.Thread(target=target, args=(local, weaklist))
t.start()
t.join()
del t
gc.collect()
self.assertEqual(len(weaklist), n)
# XXX _threading_local keeps the local of the last stopped thread alive.
deadlist = [weak for weak in weaklist if weak() is None]
self.assertIn(len(deadlist), (n-1, n))
# Assignment to the same thread local frees it sometimes (!)
local.someothervar = None
gc.collect()
deadlist = [weak for weak in weaklist if weak() is None]
self.assertTrue(len(deadlist) in (n-1, n), (n, len(deadlist)))
def test_derived(self):
# Issue 3088: if there is a threads switch inside the __init__
# of a threading.local derived class, the per-thread dictionary
# is created but not correctly set on the object.
# The first member set may be bogus.
import time
class Local(self._local):
def __init__(self):
time.sleep(0.01)
local = Local()
def f(i):
local.x = i
# Simply check that the variable is correctly set
self.assertEqual(local.x, i)
threads= []
for i in range(10):
t = threading.Thread(target=f, args=(i,))
t.start()
threads.append(t)
for t in threads:
t.join()
def test_derived_cycle_dealloc(self):
# http://bugs.python.org/issue6990
class Local(self._local):
pass
locals = None
passed = False
e1 = threading.Event()
e2 = threading.Event()
def f():
nonlocal passed
# 1) Involve Local in a cycle
cycle = [Local()]
cycle.append(cycle)
cycle[0].foo = 'bar'
# 2) GC the cycle (triggers threadmodule.c::local_clear
# before local_dealloc)
del cycle
gc.collect()
e1.set()
e2.wait()
# 4) New Locals should be empty
passed = all(not hasattr(local, 'foo') for local in locals)
t = threading.Thread(target=f)
t.start()
e1.wait()
# 3) New Locals should recycle the original's address. Creating
# them in the thread overwrites the thread state and avoids the
# bug
locals = [Local() for i in range(10)]
e2.set()
t.join()
self.assertTrue(passed)
def _test_one_class(self, c):
self._failed = "No error message set or cleared."
obj = c()
e1 = threading.Event()
e2 = threading.Event()
def f1():
obj.x = 'foo'
obj.y = 'bar'
del obj.y
e1.set()
e2.wait()
def f2():
try:
foo = obj.x
except AttributeError:
# This is expected -- we haven't set obj.x in this thread yet!
self._failed = "" # passed
else:
self._failed = ('Incorrectly got value %r from class %r\n' %
(foo, c))
sys.stderr.write(self._failed)
t1 = threading.Thread(target=f1)
t1.start()
e1.wait()
t2 = threading.Thread(target=f2)
t2.start()
t2.join()
# The test is done; just let t1 know it can exit, and wait for it.
e2.set()
t1.join()
self.assertFalse(self._failed, self._failed)
def test_threading_local(self):
self._test_one_class(self._local)
def test_threading_local_subclass(self):
class LocalSubclass(self._local):
"""To test that subclasses behave properly."""
self._test_one_class(LocalSubclass)
def _test_dict_attribute(self, cls):
obj = cls()
obj.x = 5
self.assertEqual(obj.__dict__, {'x': 5})
with self.assertRaises(AttributeError):
obj.__dict__ = {}
with self.assertRaises(AttributeError):
del obj.__dict__
def test_dict_attribute(self):
self._test_dict_attribute(self._local)
def test_dict_attribute_subclass(self):
class LocalSubclass(self._local):
"""To test that subclasses behave properly."""
self._test_dict_attribute(LocalSubclass)
class ThreadLocalTest(unittest.TestCase, BaseLocalTest):
_local = _thread._local
# Fails for the pure Python implementation
def test_cycle_collection(self):
class X:
pass
x = X()
x.local = self._local()
x.local.x = x
wr = weakref.ref(x)
del x
gc.collect()
self.assertIs(wr(), None)
class PyThreadingLocalTest(unittest.TestCase, BaseLocalTest):
_local = _threading_local.local
def test_main():
suite = unittest.TestSuite()
suite.addTest(DocTestSuite('_threading_local'))
suite.addTest(unittest.makeSuite(ThreadLocalTest))
suite.addTest(unittest.makeSuite(PyThreadingLocalTest))
local_orig = _threading_local.local
def setUp(test):
_threading_local.local = _thread._local
def tearDown(test):
_threading_local.local = local_orig
suite.addTest(DocTestSuite('_threading_local',
setUp=setUp, tearDown=tearDown)
)
support.run_unittest(suite)
if __name__ == '__main__':
test_main()
|
mancoast/CPythonPyc_test
|
fail/314_test_threading_local.py
|
Python
|
gpl-3.0
| 6,080
|
from __future__ import unicode_literals
import datetime
from django import VERSION
try:
from django.contrib.auth import get_user_model # Django 1.5
except ImportError:
from postman.future_1_5 import get_user_model
from django.http import QueryDict
from django.template import Node
from django.template import TemplateSyntaxError
from django.template import Library
from django.template.defaultfilters import date
from django.utils.translation import ugettext_lazy as _
from postman.models import (
ORDER_BY_KEY,
ORDER_BY_MAPPER,
Message,
get_user_representation,
)
register = Library()
##########
# filters
##########
@register.filter
def sub(value, arg):
"""Subtract the arg from the value."""
try:
return int(value) - int(arg)
except (ValueError, TypeError):
return value
sub.is_safe = True
@register.filter
def or_me(value, arg):
"""
Replace the value by a fixed pattern, if it equals the argument.
Typical usage: message.obfuscated_sender|or_me:user
"""
user_model = get_user_model()
if not isinstance(value, (unicode, str)):
value = (get_user_representation if isinstance(value, user_model) else unicode)(
value
)
if not isinstance(arg, (unicode, str)):
arg = (get_user_representation if isinstance(arg, user_model) else unicode)(arg)
return _("<me>") if value == arg else value
@register.filter(
**({"expects_localtime": True, "is_safe": False} if VERSION >= (1, 4) else {})
)
def compact_date(value, arg):
"""
Output a date as short as possible.
The argument must provide 3 patterns: for same day, for same year, otherwise
Typical usage: |compact_date:_("G:i,j b,j/n/y")
"""
bits = arg.split(",")
if len(bits) < 3:
return value # Invalid arg.
today = datetime.date.today()
return date(
value,
bits[0]
if value.date() == today
else bits[1]
if value.year == today.year
else bits[2],
)
#######
# tags
#######
class OrderByNode(Node):
"For use in the postman_order_by tag"
def __init__(self, code):
self.code = code
def render(self, context):
"""
Return a formatted GET query string, as "?order_key=order_val".
Preserves existing GET's keys, if any, such as a page number.
For that, the view has to provide request.GET in a 'gets' entry of the context.
"""
if "gets" in context:
gets = context["gets"].copy()
else:
gets = QueryDict("").copy()
if ORDER_BY_KEY in gets:
code = gets.pop(ORDER_BY_KEY)[0]
else:
code = None
if self.code:
gets[ORDER_BY_KEY] = self.code if self.code != code else self.code.upper()
return "?" + gets.urlencode() if gets else ""
class InboxCountNode(Node):
"For use in the postman_unread tag"
def __init__(self, asvar=None):
self.asvar = asvar
def render(self, context):
"""
Return the count of unread messages for the user found in context,
(may be 0) or an empty string.
"""
try:
user = context["user"]
if user.is_anonymous():
count = ""
else:
count = Message.objects.inbox_unread_count(user)
except (KeyError, AttributeError):
count = ""
if self.asvar:
context[self.asvar] = count
return ""
return count
@register.tag
def postman_order_by(parser, token):
"""
Compose a query string to ask for a specific ordering in messages list.
The unique argument must be one of the keywords of a set defined in the model.
Example::
<a href="{% postman_order_by subject %}">...</a>
"""
try:
tag_name, field_name = token.split_contents()
field_code = ORDER_BY_MAPPER[field_name.lower()]
except ValueError:
raise TemplateSyntaxError(
"'{0}' tag requires a single argument".format(token.contents.split()[0])
)
except KeyError:
raise TemplateSyntaxError(
"'{0}' is not a valid argument to '{1}' tag."
" Must be one of: {2}".format(field_name, tag_name, ORDER_BY_MAPPER.keys())
)
return OrderByNode(field_code)
@register.tag
def postman_unread(parser, token):
"""
Give the number of unread messages for a user,
or nothing (an empty string) for an anonymous user.
Storing the count in a variable for further processing is advised, such as::
{% postman_unread as unread_count %}
...
{% if unread_count %}
You have <strong>{{ unread_count }}</strong> unread messages.
{% endif %}
"""
bits = token.split_contents()
if len(bits) > 1:
if len(bits) != 3:
raise TemplateSyntaxError(
"'{0}' tag takes no argument or exactly two arguments".format(bits[0])
)
if bits[1] != "as":
raise TemplateSyntaxError(
"First argument to '{0}' tag must be 'as'".format(bits[0])
)
return InboxCountNode(bits[2])
else:
return InboxCountNode()
|
hzlf/openbroadcast.org
|
website/tools/postman/templatetags/postman_tags.py
|
Python
|
gpl-3.0
| 5,258
|
# coding: utf-8
from __future__ import absolute_import
from .base_model_ import Model
from datetime import date, datetime
from typing import List, Dict
from ..util import deserialize_model
class Order(Model):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id: int=None, pet_id: int=None, quantity: int=None, ship_date: datetime=None, status: str=None, complete: bool=False):
"""
Order - a model defined in Swagger
:param id: The id of this Order.
:type id: int
:param pet_id: The pet_id of this Order.
:type pet_id: int
:param quantity: The quantity of this Order.
:type quantity: int
:param ship_date: The ship_date of this Order.
:type ship_date: datetime
:param status: The status of this Order.
:type status: str
:param complete: The complete of this Order.
:type complete: bool
"""
self.swagger_types = {
'id': int,
'pet_id': int,
'quantity': int,
'ship_date': datetime,
'status': str,
'complete': bool
}
self.attribute_map = {
'id': 'id',
'pet_id': 'petId',
'quantity': 'quantity',
'ship_date': 'shipDate',
'status': 'status',
'complete': 'complete'
}
self._id = id
self._pet_id = pet_id
self._quantity = quantity
self._ship_date = ship_date
self._status = status
self._complete = complete
@classmethod
def from_dict(cls, dikt) -> 'Order':
"""
Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Order of this Order.
:rtype: Order
"""
return deserialize_model(dikt, cls)
@property
def id(self) -> int:
"""
Gets the id of this Order.
:return: The id of this Order.
:rtype: int
"""
return self._id
@id.setter
def id(self, id: int):
"""
Sets the id of this Order.
:param id: The id of this Order.
:type id: int
"""
self._id = id
@property
def pet_id(self) -> int:
"""
Gets the pet_id of this Order.
:return: The pet_id of this Order.
:rtype: int
"""
return self._pet_id
@pet_id.setter
def pet_id(self, pet_id: int):
"""
Sets the pet_id of this Order.
:param pet_id: The pet_id of this Order.
:type pet_id: int
"""
self._pet_id = pet_id
@property
def quantity(self) -> int:
"""
Gets the quantity of this Order.
:return: The quantity of this Order.
:rtype: int
"""
return self._quantity
@quantity.setter
def quantity(self, quantity: int):
"""
Sets the quantity of this Order.
:param quantity: The quantity of this Order.
:type quantity: int
"""
self._quantity = quantity
@property
def ship_date(self) -> datetime:
"""
Gets the ship_date of this Order.
:return: The ship_date of this Order.
:rtype: datetime
"""
return self._ship_date
@ship_date.setter
def ship_date(self, ship_date: datetime):
"""
Sets the ship_date of this Order.
:param ship_date: The ship_date of this Order.
:type ship_date: datetime
"""
self._ship_date = ship_date
@property
def status(self) -> str:
"""
Gets the status of this Order.
Order Status
:return: The status of this Order.
:rtype: str
"""
return self._status
@status.setter
def status(self, status: str):
"""
Sets the status of this Order.
Order Status
:param status: The status of this Order.
:type status: str
"""
allowed_values = ["placed", "approved", "delivered"]
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}"
.format(status, allowed_values)
)
self._status = status
@property
def complete(self) -> bool:
"""
Gets the complete of this Order.
:return: The complete of this Order.
:rtype: bool
"""
return self._complete
@complete.setter
def complete(self, complete: bool):
"""
Sets the complete of this Order.
:param complete: The complete of this Order.
:type complete: bool
"""
self._complete = complete
|
christophelec/test-repo
|
app/swaggerservernew/models/order.py
|
Python
|
gpl-3.0
| 4,844
|
#!/usr/bin/env python3
#
# Misc: Uncategorized checks that might be moved to some better addon later
#
# Example usage of this addon (scan a sourcefile main.cpp)
# cppcheck --dump main.cpp
# python misc.py main.cpp.dump
import cppcheckdata
import sys
import re
DEBUG = ('-debug' in sys.argv)
VERIFY = ('-verify' in sys.argv)
VERIFY_EXPECTED = []
VERIFY_ACTUAL = []
def reportError(token, severity, msg, id):
if id == 'debug' and DEBUG == False:
return
if VERIFY:
VERIFY_ACTUAL.append(str(token.linenr) + ':' + id)
else:
cppcheckdata.reportError(token, severity, msg, 'misc', id)
def simpleMatch(token, pattern):
for p in pattern.split(' '):
if not token or token.str != p:
return False
token = token.next
return True
# Get function arguments
def getArgumentsRecursive(tok, arguments):
if tok is None:
return
if tok.str == ',':
getArgumentsRecursive(tok.astOperand1, arguments)
getArgumentsRecursive(tok.astOperand2, arguments)
else:
arguments.append(tok)
def getArguments(ftok):
arguments = []
getArgumentsRecursive(ftok.astOperand2, arguments)
return arguments
def isStringLiteral(tokenString):
return tokenString.startswith('"')
# check data
def stringConcatInArrayInit(data):
# Get all string macros
stringMacros = []
for cfg in data.iterconfigurations():
for directive in cfg.directives:
res = re.match(r'#define[ ]+([A-Za-z0-9_]+)[ ]+".*', directive.str)
if res:
macroName = res.group(1)
if macroName not in stringMacros:
stringMacros.append(macroName)
# Check code
arrayInit = False
for i in range(len(data.rawTokens)):
if i < 2:
continue
tok1 = data.rawTokens[i-2].str
tok2 = data.rawTokens[i-1].str
tok3 = data.rawTokens[i-0].str
if tok3 == '}':
arrayInit = False
elif tok1 == ']' and tok2 == '=' and tok3 == '{':
arrayInit = True
elif arrayInit and (tok1 in [',', '{']):
isString2 = (isStringLiteral(tok2) or (tok2 in stringMacros))
isString3 = (isStringLiteral(tok3) or (tok3 in stringMacros))
if isString2 and isString3:
reportError(data.rawTokens[i], 'style', 'String concatenation in array initialization, missing comma?', 'stringConcatInArrayInit')
def implicitlyVirtual(data):
for cfg in data.iterconfigurations():
for function in cfg.functions:
if function.isImplicitlyVirtual is None:
continue
if not function.isImplicitlyVirtual:
continue
reportError(function.tokenDef, 'style', 'Function \'' + function.name + '\' overrides base class function but is not marked with \'virtual\' keyword.', 'implicitlyVirtual')
def ellipsisStructArg(data):
for cfg in data.iterconfigurations():
for tok in cfg.tokenlist:
if tok.str != '(':
continue
if tok.astOperand1 is None or tok.astOperand2 is None:
continue
if tok.astOperand2.str != ',':
continue
if tok.scope.type in ['Global', 'Class']:
continue
if tok.astOperand1.function is None:
continue
for argnr, argvar in tok.astOperand1.function.argument.items():
if argnr < 1:
continue
if not simpleMatch(argvar.typeStartToken, '...'):
continue
callArgs = getArguments(tok)
for i in range(argnr-1, len(callArgs)):
valueType = callArgs[i].valueType
if valueType is None:
argStart = callArgs[i].previous
while argStart.str != ',':
if argStart.str == ')':
argStart = argStart.link
argStart = argStart.previous
argEnd = callArgs[i]
while argEnd.str != ',' and argEnd.str != ')':
if argEnd.str == '(':
argEnd = argEnd.link
argEnd = argEnd.next
expression = ''
argStart = argStart.next
while argStart != argEnd:
expression = expression + argStart.str
argStart = argStart.next
reportError(tok, 'debug', 'Bailout, unknown argument type for argument \'' + expression + '\'.', 'debug')
continue
if valueType.pointer > 0:
continue
if valueType.type != 'record' and valueType.type != 'container':
continue
reportError(tok, 'style', 'Passing record to ellipsis function \'' + tok.astOperand1.function.name + '\'.', 'ellipsisStructArg')
break
for arg in sys.argv[1:]:
if arg in ['-debug', '-verify', '--cli']:
continue
print("Checking %s..." % arg)
data = cppcheckdata.CppcheckData(arg)
if VERIFY:
VERIFY_ACTUAL = []
VERIFY_EXPECTED = []
for tok in data.rawTokens:
if tok.str.startswith('//'):
for word in tok.str[2:].split(' '):
if word in ['stringConcatInArrayInit', 'implicitlyVirtual', 'ellipsisStructArg']:
VERIFY_EXPECTED.append(str(tok.linenr) + ':' + word)
stringConcatInArrayInit(data)
implicitlyVirtual(data)
ellipsisStructArg(data)
if VERIFY:
for expected in VERIFY_EXPECTED:
if expected not in VERIFY_ACTUAL:
print('Expected but not seen: ' + expected)
sys.exit(1)
for actual in VERIFY_ACTUAL:
if actual not in VERIFY_EXPECTED:
print('Not expected: ' + actual)
sys.exit(1)
|
bartlomiejgrzeskowiak/cppcheck
|
addons/misc.py
|
Python
|
gpl-3.0
| 6,125
|
import bpy
from bpy.props import EnumProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode
from sverchok.utils.math import coordinate_modes
from sverchok.utils.field.scalar import SvVectorFieldDecomposed
class SvDecomposeVectorFieldNode(bpy.types.Node, SverchCustomTreeNode):
"""
Triggers: Decompose Vector Field
Tooltip: Decompose vector field into three scalar fields
"""
bl_idname = 'SvExDecomposeVectorFieldNode'
bl_label = 'Decompose Vector Field'
bl_icon = 'OUTLINER_OB_EMPTY'
sv_icon = 'SV_VFIELD_OUT'
def update_sockets(self, context):
if self.output_mode == 'XYZ':
self.outputs[0].name = 'X'
self.outputs[1].name = 'Y'
self.outputs[2].name = 'Z'
elif self.output_mode == 'CYL':
self.outputs[0].name = 'Rho'
self.outputs[1].name = 'Phi'
self.outputs[2].name = 'Z'
else: # SPH
self.outputs[0].name = 'Rho'
self.outputs[1].name = 'Phi'
self.outputs[2].name = 'Theta'
updateNode(self, context)
output_mode : EnumProperty(
name = "Coordinates",
items = coordinate_modes,
default = 'XYZ',
update = update_sockets)
def sv_init(self, context):
self.inputs.new('SvVectorFieldSocket', "Field")
self.outputs.new('SvScalarFieldSocket', "Field1")
self.outputs.new('SvScalarFieldSocket', "Field2")
self.outputs.new('SvScalarFieldSocket', "Field3")
self.update_sockets(context)
def draw_buttons(self, context, layout):
#layout.label(text="Output:")
layout.prop(self, "output_mode", expand=True)
def process(self):
if not any(socket.is_linked for socket in self.outputs):
return
fields_s = self.inputs['Field'].sv_get()
fields_1_out = []
fields_2_out = []
fields_3_out = []
for fields in fields_s:
if not isinstance(fields, (list, tuple)):
fields = [fields]
for field in fields:
field1 = SvVectorFieldDecomposed(field, self.output_mode, 0)
field2 = SvVectorFieldDecomposed(field, self.output_mode, 1)
field3 = SvVectorFieldDecomposed(field, self.output_mode, 2)
fields_1_out.append(field1)
fields_2_out.append(field2)
fields_3_out.append(field3)
self.outputs[0].sv_set(fields_1_out)
self.outputs[1].sv_set(fields_2_out)
self.outputs[2].sv_set(fields_3_out)
def register():
bpy.utils.register_class(SvDecomposeVectorFieldNode)
def unregister():
bpy.utils.unregister_class(SvDecomposeVectorFieldNode)
|
nortikin/sverchok
|
nodes/field/decompose_vector_field.py
|
Python
|
gpl-3.0
| 2,767
|
"""
Modules for plotting
"""
|
adybbroe/atrain_match
|
atrain_match/reshaped_files_scr/__init__.py
|
Python
|
gpl-3.0
| 30
|
# -*- coding: utf-8 -*-
"""
General description
-------------------
Example that shows how to add an `shared_limit` constraint to a model.
The following energy system is modeled with four time steps:
s1 --> b1 --> | --> d1
| <-> storage1
s2a -->|--> b2 --> | --> d2
s2b -->| | <-> storage2
- The storages, storage1 and storage2, have no losses at all.
- The demands, d1 and d2, are active at steps 3 and 4, respectively.
- The supplies, s1, s2a, and s2b are active at steps 1, 2, and 3, respectively.
Usage of supply s2a is significantly cheaper then the usage of s2b.
In step 1, s1 has to be used to store in storage1 and fulfill d1 later in step 3.
Despite being the cheaper option, d2 cannot be fully covered by s2a because
storage1 and storage2 have a shared limit.
So, in step 3 -- when d1 was active -- the rest needed to fulfill d2
is stored in storage2 coming from the (now active but more expansive) s2b.
Installation requirements
-------------------------
This example requires the version v0.4.x of oemof. Install by:
pip install 'oemof>=0.4,<0.5'
"""
__copyright__ = "oemof developer group"
__license__ = "MIT"
import pandas as pd
import oemof.solph as solph
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
pd.plotting.register_matplotlib_converters()
# create energy system
energysystem = solph.EnergySystem(
timeindex=pd.date_range('1/1/2012', periods=4, freq='H'))
# create buses
b1 = solph.Bus(label="b1")
b2 = solph.Bus(label="b2")
# adding the buses to the energy system
energysystem.add(b1, b2)
energysystem.add(solph.Source(label='s1',
outputs={b1: solph.Flow(
max=[0, 1, 0, 0],
nominal_value=5)}))
energysystem.add(solph.Source(label='s2a',
outputs={b2: solph.Flow(
nominal_value=5,
max=[1, 0, 0, 0])}))
energysystem.add(solph.Source(label='s2b',
outputs={b2: solph.Flow(
max=[0, 0, 1, 0],
nominal_value=5,
variable_costs=10)}))
energysystem.add(solph.Sink(label='d1',
inputs={b1: solph.Flow(
nominal_value=5,
fix=[0, 0, 1, 0])}))
energysystem.add(solph.Sink(label='d2',
inputs={b2: solph.Flow(
nominal_value=5,
fix=[0, 0, 0, 1])}))
# create simple transformer object representing a gas power plant
storage1 = solph.components.GenericStorage(
label="storage1",
nominal_storage_capacity=5,
inputs={b1: solph.Flow()},
outputs={b1: solph.Flow()})
storage2 = solph.components.GenericStorage(
label="storage2",
nominal_storage_capacity=5,
inputs={b2: solph.Flow()},
outputs={b2: solph.Flow()})
energysystem.add(storage1, storage2)
# initialise the operational model
model = solph.Model(energysystem)
components = [storage1, storage2]
# add the shared limit constraint
solph.constraints.shared_limit(model,
model.GenericStorageBlock.storage_content,
"limit_storage", components,
[0.5, 1.25], upper_limit=7)
model.solve()
results = solph.processing.results(model)
if plt is not None:
plt.figure()
plt.plot(results[(storage1, None)]['sequences'],
label="storage1")
plt.plot(results[(storage2, None)]['sequences'],
label="storage2")
plt.plot(results[('limit_storage', 'limit_storage')]['sequences'],
label="weighted sum")
plt.grid()
plt.legend()
plt.show()
|
oemof/examples
|
oemof_examples/oemof.solph/v0.4.x/shared_limit/shared_limit.py
|
Python
|
gpl-3.0
| 3,877
|
#!/usr/bin/env python
'''
Master loader for CANON April (Spring) 2021 Campaign
'''
import os
import sys
from datetime import datetime
parentDir = os.path.join(os.path.dirname(__file__), "../")
sys.path.insert(0, parentDir)
from CANON import CANONLoader
import timing
cl = CANONLoader('stoqs_canon_april2021', 'CANON-ECOHAB - April 2021',
description='October 2021 CANON campaign in Monterey Bay (CN21S)',
x3dTerrains={
'https://stoqs.mbari.org/x3d/Monterey25_10x/Monterey25_10x_scene.x3d': {
'name': 'Monterey25_10x',
'position': '-2822317.31255 -4438600.53640 3786150.85474',
'orientation': '0.89575 -0.31076 -0.31791 1.63772',
'centerOfRotation': '-2711557.9403829873 -4331414.329506527 3801353.4691465236',
'VerticalExaggeration': '10',
},
},
grdTerrain=os.path.join(parentDir, 'Monterey25.grd')
)
startdate = datetime(2021, 4, 8)
enddate = datetime(2021, 4, 29)
# default location of thredds and dods data:
cl.tdsBase = 'http://odss.mbari.org/thredds/'
cl.dodsBase = cl.tdsBase + 'dodsC/'
######################################################################
# GLIDERS
######################################################################
# Glider data files from CeNCOOS thredds server
# L_662a updated parameter names in netCDF file
cl.l_662a_base = 'http://legacy.cencoos.org/thredds/dodsC/gliders/Line67/'
cl.l_662a_files = [ 'OS_Glider_L_662_20200615_TS.nc', ]
cl.l_662a_parms = ['temperature', 'salinity', 'fluorescence','oxygen']
cl.l_662a_startDatetime = startdate
cl.l_662a_endDatetime = enddate
# NPS_34 ##
cl.nps34_base = 'http://legacy.cencoos.org/thredds/dodsC/gliders/MBARI/'
cl.nps34_files = [ 'OS_Glider_NPS_G34_20210414_TS.nc' ]
cl.nps34_parms = ['TEMP', 'PSAL', 'FLU2', 'OXYG']
cl.nps34_startDatetime = startdate
cl.nps34_endDatetime = enddate
# NPS_29 ##
cl.nps29_base = 'http://legacy.cencoos.org/thredds/dodsC/gliders/MBARI/'
cl.nps29_files = [ 'OS_Glider_NPS_G29_20210209_TS.nc' ]
cl.nps29_parms = ['TEMP', 'PSAL', 'FLU2', 'OXYG']
cl.nps29_startDatetime = startdate
cl.nps29_endDatetime = enddate
######################################################################
# Wavegliders
######################################################################
# WG Tex - All instruments combined into one file - one time coordinate
##cl.wg_tex_base = cl.dodsBase + 'CANON_september2013/Platforms/Gliders/WG_Tex/final/'
##cl.wg_tex_files = [ 'WG_Tex_all_final.nc' ]
##cl.wg_tex_parms = [ 'wind_dir', 'wind_spd', 'atm_press', 'air_temp', 'water_temp', 'sal', 'density', 'bb_470', 'bb_650', 'chl' ]
##cl.wg_tex_startDatetime = startdate
##cl.wg_tex_endDatetime = enddate
# WG Hansen - All instruments combined into one file - one time coordinate
cl.wg_Hansen_base = 'http://dods.mbari.org/opendap/data/waveglider/deployment_data/'
cl.wg_Hansen_files = [
'wgHansen/20210409/realTime/20210409.nc'
]
cl.wg_Hansen_parms = [ 'wind_dir', 'avg_wind_spd', 'max_wind_spd', 'atm_press', 'air_temp', 'water_temp_float', 'sal_float', 'water_temp_sub',
'sal_sub', 'bb_470', 'bb_650', 'chl', 'beta_470', 'beta_650', 'pH', 'O2_conc_float','O2_conc_sub' ] # two ctds (_float, _sub), no CO2
cl.wg_Hansen_depths = [ 0 ]
cl.wg_Hansen_startDatetime = startdate
cl.wg_Hansen_endDatetime = enddate
# WG Tiny - All instruments combined into one file - one time coordinate
cl.wg_Tiny_base = 'http://dods.mbari.org/opendap/data/waveglider/deployment_data/'
cl.wg_Tiny_files = [
'wgTiny/20210408/realTime/20210408.nc'
]
cl.wg_Tiny_parms = [ 'wind_dir', 'avg_wind_spd', 'max_wind_spd', 'atm_press', 'air_temp', 'water_temp', 'sal', 'bb_470', 'bb_650', 'chl',
'beta_470', 'beta_650', 'pCO2_water', 'pCO2_air', 'pH', 'O2_conc' ]
cl.wg_Tiny_depths = [ 0 ]
cl.wg_Tiny_startDatetime = startdate
cl.wg_Tiny_endDatetime = enddate
######################################################################
# MOORINGS
######################################################################
cl.m1_base = 'http://dods.mbari.org/opendap/data/ssdsdata/deployments/m1/'
cl.m1_files = [
'202008/OS_M1_20200825hourly_CMSTV.nc',
'202008/m1_hs2_0m_20200825.nc' ]
cl.m1_parms = [
'eastward_sea_water_velocity_HR', 'northward_sea_water_velocity_HR',
'SEA_WATER_SALINITY_HR', 'SEA_WATER_TEMPERATURE_HR', 'SW_FLUX_HR', 'AIR_TEMPERATURE_HR',
'EASTWARD_WIND_HR', 'NORTHWARD_WIND_HR', 'WIND_SPEED_HR',
'bb470', 'bb676', 'fl676'
]
cl.m1_startDatetime = startdate
cl.m1_endDatetime = enddate
# Mooring 0A1
cl.oa1_base = 'http://dods.mbari.org/opendap/data/oa_moorings/deployment_data/OA1/202010/'
cl.oa1_files = [
'realTime/OA1_202010.nc'
]
cl.oa1_parms = [
'wind_dir', 'avg_wind_spd', 'atm_press', 'air_temp', 'water_temp',
'sal', 'O2_conc', 'chl', 'pCO2_water', 'pCO2_air', 'pH',
]
cl.oa1_startDatetime = startdate
cl.oa1_endDatetime = enddate
# Mooring 0A2
cl.oa2_base = 'http://dods.mbari.org/opendap/data/oa_moorings/deployment_data/OA2/201912/'
cl.oa2_files = [
'realTime/OA2_201912.nc'
]
cl.oa2_parms = [
'wind_dir', 'avg_wind_spd', 'atm_press', 'air_temp', 'water_temp',
'sal', 'O2_conc', 'chl', 'pCO2_water', 'pCO2_air', 'pH',
]
cl.oa2_startDatetime = startdate
cl.oa2_endDatetime = enddate
######################################################################
# WESTERN FLYER
######################################################################
# UCTD
cl.wfuctd_base = cl.dodsBase + 'Other/routine/Platforms/Ships/WesternFlyer/uctd/'
cl.wfuctd_parms = [ 'TEMP', 'PSAL', 'xmiss', 'wetstar' ]
cl.wfuctd_files = [
'CN21Sm01.nc',
'CN21Sm02.nc',
'CN21Sm03.nc',
'CN21Sm04.nc',
'CN21Sm05.nc',
'CN21Sm06.nc',
]
# PCTD
cl.wfpctd_base = cl.dodsBase + 'Other/routine/Platforms/Ships/WesternFlyer/pctd/'
cl.wfpctd_parms = [ 'TEMP', 'PSAL', 'xmiss', 'ecofl', 'oxygen' ]
cl.wfpctd_files = [
'CN21SC01.nc',
'CN21SC02.nc',
'CN21SC03.nc',
'CN21SC04.nc',
'CN21SC05.nc',
'CN21SC06.nc',
'CN21SC07.nc',
'CN21SC08.nc',
'CN21SC09.nc',
'CN21SC10.nc',
'CN21SC11.nc',
'CN21SC12.nc',
'CN21SC13.nc',
'CN21SC14.nc',
'CN21SC15.nc',
'CN21SC16.nc',
'CN21SC17.nc',
'CN21SC18.nc',
'CN21SC19.nc',
'CN21SC20.nc',
'CN21SC21.nc',
'CN21SC22.nc',
'CN21SC23.nc',
'CN21SC24.nc',
'CN21SC25.nc',
'CN21SC26.nc',
'CN21SC27.nc',
'CN21SC28.nc',
'CN21SC29.nc',
]
# Execute the load
cl.process_command_line()
if cl.args.test:
cl.stride = 10
elif cl.args.stride:
cl.stride = cl.args.stride
# Test correction of Sample name for leaked ESP Cartridges and spare used
##cl.makai_base = ' http://dods.mbari.org/opendap/data/lrauv/makai/missionlogs/2021/20210420_20210426/20210421T033242'
##cl.makai_files = ['202104210332_202104211941_2S_scieng.nc']
##cl.makai_parms = ['temperature']
##cl.loadLRAUV('makai', startdate, enddate, build_attrs=False)
##sys.exit()
lrauv_start = datetime(2021, 4, 11)
lrauv_end = datetime(2021, 4, 29)
cl.loadLRAUV('brizo', lrauv_start, lrauv_end)
cl.loadLRAUV('pontus', lrauv_start, lrauv_end)
cl.loadLRAUV('makai', lrauv_start, lrauv_end)
cl.loadLRAUV('daphne', lrauv_start, lrauv_end)
cl.loadM1()
cl.load_oa1()
cl.load_oa2()
cl.load_NPS29()
cl.load_NPS34()
cl.load_wg_Tiny()
cl.load_wg_Hansen()
# Realtime LRAUV loads - to be executed during the Campaign
##lrauv_parms = ['chlorophyll', 'temperature']
##cl.loadLRAUV('brizo', lrauv_start, lrauv_end, critSimpleDepthTime=0.1, sbd_logs=True,
## parameters=lrauv_parms)
##cl.loadLRAUV('pontus', lrauv_start, lrauv_end, critSimpleDepthTime=0.1, sbd_logs=True,
## parameters=lrauv_parms)
##cl.loadLRAUV('makai', lrauv_start, lrauv_end, critSimpleDepthTime=0.1, sbd_logs=True,
## parameters=lrauv_parms)
##cl.loadLRAUV('daphne', lrauv_start, lrauv_end, critSimpleDepthTime=0.1, sbd_logs=True,
## parameters=lrauv_parms)
cl.loadDorado(startdate, enddate, build_attrs=True)
cl.loadWFuctd()
cl.loadWFpctd()
##cl.loadSubSamples()
# Add any X3D Terrain information specified in the constructor to the database - must be done after a load is executed
cl.addTerrainResources()
print("All Done.")
|
stoqs/stoqs
|
stoqs/loaders/CANON/loadCANON_april2021.py
|
Python
|
gpl-3.0
| 8,768
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
""" Run radio characterizations on nodes """
import os
import sys
import time
import serial_aggregator
from serial_aggregator import NodeAggregator
FIRMWARE_PATH = "node_radio_characterization.elf"
class RadioCharac(object):
""" Radio Characterization """
def __init__(self, nodes_list):
self.state = {}
self.nodes = NodeAggregator(nodes_list)
self.current_sender_node = None
def start(self):
""" Start nodes connection """
self.nodes.start()
def stop(self):
""" Stop nodes connection """
self.nodes.stop()
def _answers_handler(self, node_id, msg):
""" Handle answers """
try:
line = msg.split(' ')
if line[0] == 'config_radio':
if line[1] == 'ACK':
self.state['config']['success'].append(node_id)
else:
self.state['config']['failure'].append(node_id)
elif line[0] == 'send_packets':
if line[1] == 'ACK':
# send_packets ACK 0
self.state['send']['success'].append(node_id)
else:
# send_packets NACK 42
self.state['send']['failure'].append('%s %s' %
(node_id, line[2]))
elif line[0] == 'radio_rx':
# "radio_rx m3-128 -17dBm 5 -61 255 sender power num rssi lqi"
sender = line[1]
# add list for this node
results = self.state['radio'][sender].get(
node_id, {'success': [], 'errors': []})
self.state['radio'][sender][node_id] = results
# add rx informations
#results.append("%s" % ' '.join(line[2:6]))
results['success'].append(tuple(line[2:6]))
elif line[0] == 'radio_rx_error':
sender = self.current_sender_node.node_id
# add list for this node
results = self.state['radio'][sender].get(
node_id, {'success': [], 'errors': []})
self.state['radio'][sender][node_id] = results
results['errors'].append("%s" % line[1])
# print >> sys.stderr, "Radio_rx_error %s %s sender %s" % (
# node_id, line[1], self.current_sender_node.node_id)
else:
print >> sys.stderr, "UNKOWN_MSG: %s %r" % (node_id, msg)
except IndexError:
print >> sys.stderr, "UNKOWN_MSG: %s %r" % (node_id, msg)
def run_characterization(self, channel, power, num_pkts, delay):
""" Run the radio characterizations on nodes"""
self.start()
self.state['options'] = {'power': power, 'channel': channel,
'num_pkts': num_pkts, 'delay': delay}
self.state['config'] = {'success': [], 'failure': []}
self.state['send'] = {'success': [], 'failure': []}
#nodes = self.nodes.values()
#self.nodes_cli('--update', firmware=FIRMWARE_PATH)
#time.sleep(2) # wait started
# init all nodes handlers and radio config
self.state['radio'] = {}
for node in self.nodes.values():
self.state['radio'][node.node_id] = {}
node.line_handler.append(self._answers_handler)
cmd = "config_radio -c {channel}\n"
self.nodes.broadcast(cmd.format(channel=channel))
time.sleep(10) # wait
cmd = "send_packets -i {node_id} -p {power} -n {num_pkts} -d {delay}\n"
for node in self.nodes.values():
self.current_sender_node = node
print >> sys.stderr, "sending node %s" % node.node_id
node.send(cmd.format(node_id=node.node_id, power=power,
num_pkts=num_pkts, delay=delay))
time.sleep(2)
self.current_sender_node = None
self.stop()
return self.state
def simple_results_summary(result, human_readable=False):
""" Parse outputs to be readable by a human """
num_pkt = result['options']['num_pkts']
for sender_node in result['radio'].values():
for rx_node in sender_node:
node_result = {}
raw_result = sender_node[rx_node]['success']
raw_errors = sender_node[rx_node]['errors']
if len(raw_result):
# Average RSSI
average_rssi = sum([int(res[2]) for res in raw_result]) \
/ float(len(raw_result))
node_result['average_rssi'] = average_rssi
# Packet loss
rx_pkt = 100 * len(raw_result) / float(num_pkt)
node_result['pkt_reception'] = "%.1f %%" % rx_pkt
if len(raw_errors):
# errors
node_result['errors'] = len(raw_errors)
sender_node[rx_node] = node_result
if human_readable:
# increase readability when using command line
sender_node[rx_node] = "%s" % sender_node[rx_node]
return result
def main(argv):
""" Run a characterization script on all nodes from an experiment """
json_dict = serial_aggregator.extract_json(sys.stdin.read())
nodes_list = serial_aggregator.extract_nodes(json_dict, os.uname()[1])
rad_charac = RadioCharac(nodes_list)
num_pkt = 32
result = rad_charac.run_characterization(channel=16, power="-17dBm",
num_pkts=32, delay=10)
if '--summary' in argv:
result = simple_results_summary(result, human_readable=('-h' in argv))
import json
result_str = json.dumps(result, sort_keys=True, indent=4)
print result_str
if __name__ == "__main__":
main(sys.argv)
|
kYc0o/openlab-contiki
|
appli/iotlab/node_radio_characterization/run_characterization/run_characterization.py
|
Python
|
gpl-3.0
| 5,885
|
# -*- coding: UTF-8 -*-
"""
Lastship Add-on (C) 2017
Credits to Exodus and Covenant; our thanks go to their creators
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# Addon Name: lastship
# Addon id: plugin.video.lastship
# Addon Provider: LastShip
import os.path
files = os.listdir(os.path.dirname(__file__))
__all__ = [filename[:-3] for filename in files if not filename.startswith('__') and filename.endswith('.py')]
|
lastship/plugin.video.lastship
|
resources/lib/sources/de/__init__.py
|
Python
|
gpl-3.0
| 1,043
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime, timedelta
from odoo import fields
from odoo.tests.common import TransactionCase
class TestCalendar(TransactionCase):
def setUp(self):
super(TestCalendar, self).setUp()
self.CalendarEvent = self.env['calendar.event']
# In Order to test calendar, I will first create One Simple Event with real data
self.event_tech_presentation = self.CalendarEvent.create({
'privacy': 'private',
'start': '2011-04-30 16:00:00',
'stop': '2011-04-30 18:30:00',
'description': 'The Technical Presentation will cover following topics:\n* Creating Odoo class\n* Views\n* Wizards\n* Workflows',
'duration': 2.5,
'location': 'Odoo S.A.',
'name': 'Technical Presentation'
})
def test_calender_event(self):
# Now I will set recurrence for this event to occur monday and friday of week
data = {
'fr': 1,
'mo': 1,
'interval': 1,
'rrule_type': 'weekly',
'end_type': 'end_date',
'final_date': '2011-05-31 00:00:00',
'recurrency': True
}
self.event_tech_presentation.write(data)
# In order to check that recurrent events are views successfully in calendar view, I will open calendar view of events|
self.CalendarEvent.fields_view_get(False, 'calendar')
# In order to check that recurrent events are views successfully in calendar view, I will search for one of the recurrent event and count the number of events
rec_events = self.CalendarEvent.with_context({'virtual_id': True}).search([
('start', '>=', '2011-04-30 16:00:00'), ('start', '<=', '2011-05-31 00:00:00')
])
self.assertEqual(len(rec_events), 9, 'Wrong number of events found')
# Now I move a virtual event, to see that a real event is well created and depending from the native recurrence
before = self.CalendarEvent.with_context({'virtual_id': False}).search([
('start', '>=', '2011-04-30 16:00:00'), ('start', '<=', '2011-05-31 00:00:00')
])
# We start by detach the event
newevent = rec_events[1].detach_recurring_event()
newevent.with_context({'virtual_id': True}).write({'name': 'New Name', 'recurrency': True})
after = self.CalendarEvent.with_context({'virtual_id': False}).search([
('start', '>=', '2011-04-30 16:00:00'), ('start', '<=', '2011-05-31 00:00:00')
])
self.assertEqual(len(after), len(before) + 1, 'Wrong number of events found, after to have moved a virtual event')
new_event = after - before
self.assertEqual(new_event[0].recurrent_id, before.id, 'Recurrent_id not correctly passed to the new event')
# Now I will test All day event
allday_event = self.CalendarEvent.create({
'allday': 1,
'privacy': 'confidential',
'start': '2011-04-30 00:00:00',
'stop': '2011-04-30 00:00:00',
'description': 'All day technical test',
'location': 'School',
'name': 'All day test event'
})
# In order to check reminder I will first create reminder
res_alarm_day_before_event_starts = self.env['calendar.alarm'].create({
'name': '1 Day before event starts',
'duration': 1,
'interval': 'days',
'type': 'notification'
})
# Now I will assign this reminder to all day event|
allday_event.write({'alarm_ids': [(6, 0, [res_alarm_day_before_event_starts.id])]})
# I create a recuring rule for my event
calendar_event_sprint_review = self.CalendarEvent.create({
'name': 'Begin of month meeting',
'start': fields.Date.today() + ' 12:00:00',
'stop': fields.Date.today() + ' 18:00:00',
'recurrency': True,
'rrule': 'FREQ=MONTHLY;INTERVAL=1;COUNT=12;BYDAY=1MO'
})
# I check that the attributes are set correctly
self.assertEqual(calendar_event_sprint_review.rrule_type, 'monthly', 'rrule_type should be mothly')
self.assertEqual(calendar_event_sprint_review.count, 12, 'rrule_type should be mothly')
self.assertEqual(calendar_event_sprint_review.month_by, 'day', 'rrule_type should be mothly')
self.assertEqual(calendar_event_sprint_review.byday, '1', 'rrule_type should be mothly')
self.assertEqual(calendar_event_sprint_review.week_list, 'MO', 'rrule_type should be mothly')
def test_event_order(self):
""" check the ordering of events when searching """
def create_event(name, date):
return self.CalendarEvent.create({
'name': name,
'start': date + ' 12:00:00',
'stop': date + ' 14:00:00',
'duration': 2.0,
})
foo1 = create_event('foo', '2011-04-01')
foo2 = create_event('foo', '2011-06-01')
bar1 = create_event('bar', '2011-05-01')
bar2 = create_event('bar', '2011-06-01')
domain = [('id', 'in', (foo1 + foo2 + bar1 + bar2).ids)]
# sort them by name only
events = self.CalendarEvent.search(domain, order='name')
self.assertEqual(events.mapped('name'), ['bar', 'bar', 'foo', 'foo'])
events = self.CalendarEvent.search(domain, order='name desc')
self.assertEqual(events.mapped('name'), ['foo', 'foo', 'bar', 'bar'])
# sort them by start date only
events = self.CalendarEvent.search(domain, order='start')
self.assertEqual(events.mapped('start'), (foo1 + bar1 + foo2 + bar2).mapped('start'))
events = self.CalendarEvent.search(domain, order='start desc')
self.assertEqual(events.mapped('start'), (foo2 + bar2 + bar1 + foo1).mapped('start'))
# sort them by name then start date
events = self.CalendarEvent.search(domain, order='name asc, start asc')
self.assertEqual(list(events), [bar1, bar2, foo1, foo2])
events = self.CalendarEvent.search(domain, order='name asc, start desc')
self.assertEqual(list(events), [bar2, bar1, foo2, foo1])
events = self.CalendarEvent.search(domain, order='name desc, start asc')
self.assertEqual(list(events), [foo1, foo2, bar1, bar2])
events = self.CalendarEvent.search(domain, order='name desc, start desc')
self.assertEqual(list(events), [foo2, foo1, bar2, bar1])
# sort them by start date then name
events = self.CalendarEvent.search(domain, order='start asc, name asc')
self.assertEqual(list(events), [foo1, bar1, bar2, foo2])
events = self.CalendarEvent.search(domain, order='start asc, name desc')
self.assertEqual(list(events), [foo1, bar1, foo2, bar2])
events = self.CalendarEvent.search(domain, order='start desc, name asc')
self.assertEqual(list(events), [bar2, foo2, bar1, foo1])
events = self.CalendarEvent.search(domain, order='start desc, name desc')
self.assertEqual(list(events), [foo2, bar2, bar1, foo1])
def test_event_activity(self):
# ensure meeting activity type exists
meeting_act_type = self.env['mail.activity.type'].search([('category', '=', 'meeting')], limit=1)
if not meeting_act_type:
meeting_act_type = self.env['mail.activity.type'].create({
'name': 'Meeting Test',
'category': 'meeting',
})
# have a test model inheriting from activities
test_record = self.env['res.partner'].create({
'name': 'Test',
})
now = datetime.now()
test_user = self.env.ref('base.user_demo')
test_name, test_description, test_description2 = 'Test-Meeting', '<p>Test-Description</p>', '<p>NotTest</p>'
# create using default_* keys
test_event = self.env['calendar.event'].sudo(test_user).with_context(
default_res_model=test_record._name,
default_res_id=test_record.id,
).create({
'name': test_name,
'description': test_description,
'start': fields.Datetime.to_string(now + timedelta(days=-1)),
'stop': fields.Datetime.to_string(now + timedelta(hours=2)),
'user_id': self.env.user.id,
})
self.assertEqual(test_event.res_model, test_record._name)
self.assertEqual(test_event.res_id, test_record.id)
self.assertEqual(len(test_record.activity_ids), 1)
self.assertEqual(test_record.activity_ids.summary, test_name)
self.assertEqual(test_record.activity_ids.note, test_description)
self.assertEqual(test_record.activity_ids.user_id, self.env.user)
self.assertEqual(test_record.activity_ids.date_deadline, fields.Date.to_string((now + timedelta(days=-1)).date()))
# updating event should update activity
test_event.write({
'name': '%s2' % test_name,
'description': test_description2,
'start': fields.Datetime.to_string(now + timedelta(days=-2)),
'user_id': test_user.id,
})
self.assertEqual(test_record.activity_ids.summary, '%s2' % test_name)
self.assertEqual(test_record.activity_ids.note, test_description2)
self.assertEqual(test_record.activity_ids.user_id, test_user)
self.assertEqual(test_record.activity_ids.date_deadline, fields.Date.to_string((now + timedelta(days=-2)).date()))
# deleting meeting should delete its activity
test_record.activity_ids.unlink()
self.assertEqual(self.env['calendar.event'], self.env['calendar.event'].search([('name', '=', test_name)]))
# create using active_model keys
test_event = self.env['calendar.event'].sudo(self.env.ref('base.user_demo')).with_context(
active_model=test_record._name,
active_id=test_record.id,
).create({
'name': test_name,
'description': test_description,
'start': fields.Datetime.to_string(now + timedelta(days=-1)),
'stop': fields.Datetime.to_string(now + timedelta(hours=2)),
'user_id': self.env.user.id,
})
self.assertEqual(test_event.res_model, test_record._name)
self.assertEqual(test_event.res_id, test_record.id)
self.assertEqual(len(test_record.activity_ids), 1)
|
richard-willowit/odoo
|
addons/calendar/tests/test_calendar.py
|
Python
|
gpl-3.0
| 10,554
|
# Copyright 2016 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
import time
from hscommon.util import format_time_decimal
def format_timestamp(t, delta):
if delta:
return format_time_decimal(t)
else:
if t > 0:
return time.strftime("%Y/%m/%d %H:%M:%S", time.localtime(t))
else:
return "---"
def format_words(w):
def do_format(w):
if isinstance(w, list):
return "(%s)" % ", ".join(do_format(item) for item in w)
else:
return w.replace("\n", " ")
return ", ".join(do_format(item) for item in w)
def format_perc(p):
return "%0.0f" % p
def format_dupe_count(c):
return str(c) if c else "---"
def cmp_value(dupe, attrname):
value = getattr(dupe, attrname, "")
return value.lower() if isinstance(value, str) else value
def fix_surrogate_encoding(s, encoding="utf-8"):
# ref #210. It's possible to end up with file paths that, while correct unicode strings, are
# decoded with the 'surrogateescape' option, which make the string unencodable to utf-8. We fix
# these strings here by trying to encode them and, if it fails, we do an encode/decode dance
# to remove the problematic characters. This dance is *lossy* but there's not much we can do
# because if we end up with this type of string, it means that we don't know the encoding of the
# underlying filesystem that brought them. Don't use this for strings you're going to re-use in
# fs-related functions because you're going to lose your path (it's going to change). Use this
# if you need to export the path somewhere else, outside of the unicode realm.
# See http://lucumr.pocoo.org/2013/7/2/the-updated-guide-to-unicode/
try:
s.encode(encoding)
except UnicodeEncodeError:
return s.encode(encoding, "replace").decode(encoding)
else:
return s
|
mahmutf/dupeguru
|
core/util.py
|
Python
|
gpl-3.0
| 2,111
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2020 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
import pytest
import pytest_bdd as bdd
bdd.scenarios('yankpaste.feature')
@pytest.fixture(autouse=True)
def init_fake_clipboard(quteproc):
"""Make sure the fake clipboard will be used."""
quteproc.send_cmd(':debug-set-fake-clipboard')
@bdd.when(bdd.parsers.parse('I insert "{value}" into the text field'))
def set_text_field(quteproc, value):
quteproc.send_cmd(":jseval --world=0 set_text('{}')".format(value))
quteproc.wait_for_js('textarea set to: ' + value)
|
t-wissmann/qutebrowser
|
tests/end2end/features/test_yankpaste_bdd.py
|
Python
|
gpl-3.0
| 1,283
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) 2008-2013 - Lionel Dricot & Bertrand Rousseau
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
try:
from gi.repository import GnomeKeyring
except ImportError:
GnomeKeyring = None
from GTG.tools.borg import Borg
from GTG.tools.logger import Log
class GNOMEKeyring(Borg):
def __init__(self):
super().__init__()
if not hasattr(self, "keyring"):
result, self.keyring = GnomeKeyring.get_default_keyring_sync()
if result != GnomeKeyring.Result.OK:
raise Exception("Can't get default keyring, error=%s" % result)
def set_password(self, name, password, userid=""):
attrs = GnomeKeyring.Attribute.list_new()
GnomeKeyring.Attribute.list_append_string(attrs, "backend", name)
result, password_id = GnomeKeyring.item_create_sync(
self.keyring,
GnomeKeyring.ItemType.GENERIC_SECRET,
name,
attrs,
password,
True)
if result != GnomeKeyring.Result.OK:
raise Exception("Can't create a new password, error=%s" % result)
return password_id
def get_password(self, item_id):
result, item_info = GnomeKeyring.item_get_info_sync(
self.keyring, item_id)
if result == GnomeKeyring.Result.OK:
return item_info.get_secret()
else:
return ""
class FallbackKeyring(Borg):
def __init__(self):
super().__init__()
if not hasattr(self, "keyring"):
self.keyring = {}
self.max_key = 1
def set_password(self, name, password, userid=""):
""" This implementation does nto need name and userid.
It is there because of GNOMEKeyring """
# Find unused key
while self.max_key in self.keyring:
self.max_key += 1
self.keyring[self.max_key] = password
return self.max_key
def get_password(self, key):
return self.keyring.get(key, "")
if GnomeKeyring is not None:
Keyring = GNOMEKeyring
else:
Log.info("GNOME keyring was not found, passwords will be not stored after\
restart of GTG")
Keyring = FallbackKeyring
|
shtrom/gtg
|
GTG/tools/keyring.py
|
Python
|
gpl-3.0
| 3,089
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
]
operations = [
migrations.CreateModel(
name='IPBan',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ip', models.GenericIPAddressField()),
('created', models.DateTimeField(default=django.utils.timezone.now, db_index=True)),
('deleted', models.DateTimeField(null=True, blank=True)),
],
),
]
|
jwhitlock/kuma
|
kuma/core/migrations/0001_squashed_0004_remove_unused_tags.py
|
Python
|
mpl-2.0
| 728
|
import copy
import uuid
from rest_framework.serializers import ValidationError
from django.test import TestCase
from . import fixtures, get_mock_context
from api.serializers.data_objects import DataObjectSerializer, \
FileResourceSerializer
from api.models.data_objects import DataObject
class TestDataObjectSerializer(TestCase):
def testCreate_file(self):
data = fixtures.data_objects.file_data_object
s = DataObjectSerializer(data=data)
s.is_valid(raise_exception=True)
data_object = s.save()
self.assertEqual(data_object.file_resource.filename,
data['value']['filename'])
def testCreate_countQueries(self):
data = fixtures.data_objects.file_data_object
s = DataObjectSerializer(data=data)
s.is_valid(raise_exception=True)
self.assertNumQueries(4, lambda: s.save())
def testRender_file(self):
file_data = fixtures.data_objects.file_data_object['value']
data_object = DataObject.create_and_initialize_file_resource(**file_data)
s = DataObjectSerializer(data_object,
context=get_mock_context())
rendered_data = s.data
value = rendered_data['value']
self.assertEqual(value['filename'], file_data['filename'])
self.assertEqual(value['md5'], file_data['md5'])
self.assertEqual(value['source_type'], file_data['source_type'])
self.assertEqual(value['import_comments'], file_data['import_comments'])
self.assertEqual(value['imported_from_url'], file_data['imported_from_url'])
def testRoundTrip_file(self):
file_data = fixtures.data_objects.file_data_object['value']
data_object = DataObject.create_and_initialize_file_resource(**file_data)
s = DataObjectSerializer(data_object,
context=get_mock_context())
rendered_1 = s.data
# update UUID to avoid collision
input_2 = copy.deepcopy(rendered_1)
input_2['uuid'] = str(uuid.uuid4())
s = DataObjectSerializer(data=input_2)
s.is_valid(raise_exception=True)
data_object = s.save()
s = DataObjectSerializer(data_object,
context=get_mock_context())
rendered_2 = s.data
self.assertEqual(rendered_1['type'],
rendered_2['type'])
self.assertEqual(rendered_1['datetime_created'],
rendered_2['datetime_created'])
self.assertNotEqual(rendered_1['uuid'],
rendered_2['uuid'])
self.assertNotEqual(rendered_1['url'],
rendered_2['url'])
self.assertEqual(rendered_1['value']['filename'],
rendered_2['value']['filename'])
self.assertEqual(rendered_1['value']['md5'],
rendered_2['value']['md5'])
self.assertEqual(rendered_1['value']['import_comments'],
rendered_2['value']['import_comments'])
self.assertEqual(rendered_1['value']['imported_from_url'],
rendered_2['value']['imported_from_url'])
self.assertEqual(rendered_1['value']['upload_status'],
rendered_2['value']['upload_status'])
self.assertEqual(rendered_1['value']['source_type'],
rendered_2['value']['source_type'])
self.assertEqual(rendered_1['value']['file_url'],
rendered_2['value']['file_url'])
def testCreate_AlreadyExists(self):
file_data = copy.deepcopy(fixtures.data_objects.file_data_object)['value']
data_object = DataObject.create_and_initialize_file_resource(**file_data)
s = DataObjectSerializer(data_object,
context=get_mock_context())
rendered_1 = s.data
data_object_count_before = DataObject.objects.count()
s = DataObjectSerializer(data=rendered_1)
s.is_valid(raise_exception=True)
data_object = s.save()
# Verify that no new object was created
data_object_count_after = DataObject.objects.count()
self.assertEqual(data_object_count_before, data_object_count_after)
def testCreate_ErrorAlreadyExistsWithMismatch(self):
file_data = copy.deepcopy(fixtures.data_objects.file_data_object)['value']
data_object = DataObject.create_and_initialize_file_resource(**file_data)
s = DataObjectSerializer(data_object,
context=get_mock_context())
data_object_count_before = DataObject.objects.count()
rendered_1 = s.data
rendered_1['value']['md5'] = '192f08c86f675deca469ea50ffac38e0'
s = DataObjectSerializer(data=rendered_1)
with self.assertRaises(ValidationError):
s.is_valid(raise_exception=True)
data_object = s.save()
# Verify that no new object was created
data_object_count_after = DataObject.objects.count()
self.assertEqual(data_object_count_before, data_object_count_after)
def testCreate_noDroolOnFail(self):
file_data = copy.deepcopy(fixtures.data_objects.file_data_object)
file_data['value']['md5'] = 'invalid_md5'
data_object_count_before = DataObject.objects.count()
s = DataObjectSerializer(data=file_data)
s.is_valid(raise_exception=True)
with self.assertRaises(ValidationError):
m = s.save()
data_object_count_after = DataObject.objects.count()
self.assertEqual(data_object_count_before, data_object_count_after)
data_object_fixtures = [
copy.deepcopy(fixtures.data_objects.string_data_object),
copy.deepcopy(fixtures.data_objects.boolean_data_object),
copy.deepcopy(fixtures.data_objects.float_data_object),
copy.deepcopy(fixtures.data_objects.integer_data_object)
]
def testCreate_nonFileTypes(self):
for data in self.data_object_fixtures:
s = DataObjectSerializer(data=data)
s.is_valid(raise_exception=True)
data_object = s.save()
self.assertEqual(data_object.value, data['value'])
rendered_data = DataObjectSerializer(
data_object, context=get_mock_context()).data
self.assertEqual(data_object.value, rendered_data['value'])
def testRoundTrip_nonFileTypes(self):
for data in self.data_object_fixtures:
s1 = DataObjectSerializer(data=data)
s1.is_valid(raise_exception=True)
data_object_1 = s1.save()
self.assertEqual(data_object_1.value, data['value'])
rendered_data = DataObjectSerializer(
data_object_1, context=get_mock_context()).data
# Update UUID to avoid collision
rendered_data['uuid'] = uuid.uuid4()
s2 = DataObjectSerializer(data=rendered_data)
s2.is_valid(raise_exception=True)
data_object_2 = s2.save()
self.assertEqual(data_object_1.value, data_object_2.value)
def TestDataObjectUpdateSerializer(TestCase):
def testUpdateUploadStatus(self):
file_data = fixtures.data_objects.file_data_object['value']
data_object = DataObject.create_and_initialize_file_resource(**file_data)
s = DataObjectSerializer(data_object,
context=get_mock_context())
s.save()
s2 = DataObjectUpdateSerializer(data_object)
s2.update(
data_object, {'value': {'upload_status': 'error'}})
self.assertEqual(s2.data['value']['upload_status'], 'error')
def testUpdateProtectedValue(self):
file_data = fixtures.data_objects.file_data_object['value']
data_object = DataObject.create_and_initialize_file_resource(**file_data)
s = DataObjectSerializer(data_object,
context=get_mock_context())
s.save()
s2 = DataObjectUpdateSerializer(data_object)
with self.assertRaises(ValidationError):
s2.update(
data_object, {'type': 'string'})
|
StanfordBioinformatics/loom
|
server/loomengine_server/api/test/serializers/test_data_objects.py
|
Python
|
agpl-3.0
| 8,202
|
#-*- coding:utf-8 -*-
#
#
# Copyright (C) 2013 Michael Telahun Makonnen <mmakonnen@gmail.com>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
{
'name': 'Time Accrual Policy',
'version': '1.0',
'category': 'Generic Modules/Human Resources',
'description': """
Define Time Accrual Policies
============================
Define properties of a leave accrual policy. The accrued time is calculated
based on the length of service of the employee. An additional premium may be
added on the base rate based on additional months of service. This policy is ideal
for annual leave accruals. If the type of accrual is 'Standard' time is accrued and
withdrawn manually. However, if the type is 'Calendar' the time is accrued (and recorded)
at a fixed frequency.
""",
'author': 'Michael Telahun Makonnen <mmakonnen@gmail.com>',
'website': 'http://miketelahun.wordpress.com',
'depends': [
'hr_accrual',
'hr_contract_state',
'hr_employee_seniority',
'hr_policy_group',
],
'init_xml': [
],
'update_xml': [
'security/ir.model.access.csv',
'hr_policy_accrual_cron.xml',
'hr_policy_accrual_view.xml',
],
'test': [
],
'demo_xml': [
],
'installable': False,
'active': False,
}
|
yvaucher/hr
|
__unported__/hr_policy_accrual/__openerp__.py
|
Python
|
agpl-3.0
| 1,961
|
# -*- coding: utf-8 -*-
# Copyright 2021 El Nogal - Pedro Gómez <pegomez@elnogal.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Delivery Routes",
"version": '8.0.1.0.1',
"category": 'Sale',
"description": """Delivery routes""",
"author": 'Pedro Gómez',
"website": 'www.elnogal.com',
"depends": [
'sale',
'stock',
'eln_sale',
],
"init_xml": [],
"data": [
'wizard/wiz_default_delivery_route.xml',
'security/ir.model.access.csv',
'views/delivery_route_view.xml',
'views/res_partner_view.xml',
'views/stock_view.xml',
],
"demo_xml": [],
"installable": True
}
|
Comunitea/CMNT_00040_2016_ELN_addons
|
delivery_route/__openerp__.py
|
Python
|
agpl-3.0
| 708
|
"""Chirp bindings."""
import concurrent.futures as fut
import sys
import threading
from _chirp_cffi import ffi, lib
from . import common, const
class ChirpPool(object):
"""TODO Documentation -> async to pool, ccchirp to chirp.
Chirp is message passing with fully automatic connection setup and
cleanup. Just create a Chirp() object, await obj.start(), create a Message
using port and address of a peer and then await obj.send(msg).
.. code-block:: python
def setup_chirp():
client = chirp.Chirp()
client.start()
msg = chirp.Message(
"10.10.10.10",
2998,
"yo dawg",
)
load = client.send(msg).result()
client.close()
The connection will be removed if it wasn't used for REUSE_TIME or when you
close chirp.
The config argument can either be None, a object of a class like
:class:`ccchirp.Config` or a dictionary containing the same keys as
:class:`ccchirp.Config`. If config is None the default will be used. When
config is set, the defined config variables will be taken and default
copied from :class:`ccchirp.Config`.
:param config: Config as either a object or a dictionary.
:type config: :py:class:`object` or :py:class:`dict`
"""
def __init__(
self,
config = None,
):
self._config = common.complete_config(config, const.Config())
self._c_config = ffi.new("ch_config_t*")
self._chirp = ffi.new("ch_chirp_t*")
self._loop = ffi.new("uv_loop_t*")
self._cert_str = None
self._dh_str = None
self._thread = None
self._pool = None
self._uv_ret = 0
self._chirp_ret = 0
# TODO properties
def start(self):
"""Start servers and cleanup routines."""
self._fill_c_config()
lib.ch_loop_init(self._loop)
err = lib.ch_chirp_init(
self._chirp,
self._c_config,
self._loop,
lib.python_log_cb
)
if err == lib.CH_EADDRINUSE:
raise RuntimeError("Port {port_number:d} already in"
"use.".format(port_number=self._config.PORT))
assert err == lib.CH_SUCCESS
lib.ch_chirp_set_auto_stop(self._chirp)
def run():
"""Run chirp in a thread."""
self._uv_ret = lib.ch_run(self._loop)
self._chirp_ret = lib.ch_loop_close(self._loop)
self._pool = fut.ThreadPoolExecutor(
max_workers=self._config.MAX_HANDLERS
)
self._thread = threading.Thread(target=run)
self._thread.start()
def close(self):
"""Closing everything."""
lib.ch_chirp_close_ts(self._chirp)
self._thread.join()
assert self._uv_ret == lib.CH_SUCCESS
assert self._chirp_ret == lib.CH_SUCCESS
self._pool.shutdown()
def _fill_c_config(self):
"""Fill in the c_config from the config."""
c_conf = self._c_config
conf = self._config
lib.ch_chirp_config_init(c_conf)
(
conf.CERT_CHAIN_PEM,
conf.DH_PARAMS_PEM
) = common.get_crypto_files()
for std_attr in [
'REUSE_TIME',
'TIMEOUT',
'PORT',
'BACKLOG',
'RETRIES',
]:
setattr(
c_conf,
std_attr,
getattr(
conf,
std_attr,
)
)
self._cert_str = ffi.new(
"char[]", conf.CERT_CHAIN_PEM.encode("UTF-8")
)
self._dh_str = ffi.new(
"char[]", conf.DH_PARAMS_PEM.encode("UTF-8")
)
c_conf.CERT_CHAIN_PEM = self._cert_str
c_conf.DH_PARAMS_PEM = self._dh_str
if sys.version_info > (3, 4):
class ChirpAsync(object): # pragma: no cover TODO
"""TODO.
Chirp is message passing with fully automatic connection setup and
cleanup. Just create a Chirp() object, await obj.start(), create a
Message using port and address of a peer and then await obj.send(msg).
.. code-block:: python
async def setup_chirp():
client = chirp.Chirp()
await client.start()
msg = chirp.Message(
"10.10.10.10",
2998,
"yo dawg",
)
load = await client.send(msg)
await client.close()
The connection will be removed if it wasn't used for REUSE_TIME or when
you close chirp.
The config argument can either be None, a object of a class like
:class:`ccchirp.Config` or a dictionary containing the same keys as
:class:`ccchirp.Config`. If config is None the default will be used.
When config is set, the defined config variables will be taken and
default copied from :class:`ccchirp.Config`.
:param config: Config as either a object or a dictionary.
:type config: :py:class:`object` or :py:class:`dict`
"""
def __init__(
self,
config = None,
):
self._config = common.complete_config(
config, const.Config()
)
|
ganwell/c4irp
|
chirp/chirp.py
|
Python
|
agpl-3.0
| 5,427
|
# -*- coding: utf-8 -*-
"""Tests for LTI Xmodule LTIv2.0 functional logic."""
import datetime
import textwrap
from django.utils.timezone import UTC
from mock import Mock
from xmodule.lti_module import LTIDescriptor
from xmodule.lti_2_util import LTIError
from . import LogicTest
class LTI20RESTResultServiceTest(LogicTest):
"""Logic tests for LTI module. LTI2.0 REST ResultService"""
descriptor_class = LTIDescriptor
def setUp(self):
super(LTI20RESTResultServiceTest, self).setUp()
self.environ = {'wsgi.url_scheme': 'http', 'REQUEST_METHOD': 'POST'}
self.system.get_real_user = Mock()
self.system.publish = Mock()
self.system.rebind_noauth_module_to_user = Mock()
self.user_id = self.xmodule.runtime.anonymous_student_id
self.lti_id = self.xmodule.lti_id
self.xmodule.due = None
self.xmodule.graceperiod = None
def test_sanitize_get_context(self):
"""Tests that the get_context function does basic sanitization"""
# get_context, unfortunately, requires a lot of mocking machinery
mocked_course = Mock(name='mocked_course', lti_passports=['lti_id:test_client:test_secret'])
modulestore = Mock(name='modulestore')
modulestore.get_course.return_value = mocked_course
runtime = Mock(name='runtime', modulestore=modulestore)
self.xmodule.descriptor.runtime = runtime
self.xmodule.lti_id = "lti_id"
test_cases = ( # (before sanitize, after sanitize)
(u"plaintext", u"plaintext"),
(u"a <script>alert(3)</script>", u"a <script>alert(3)</script>"), # encodes scripts
(u"<b>bold 包</b>", u"<b>bold 包</b>"), # unicode, and <b> tags pass through
(u'<img src="image.jpg" alt="alt" title="title" height="50" width="50">', # attributes are not identical
u'<img src="image.jpg" alt="alt" height="50" width="50" title="title">') # b/c sanitizer changes order
)
for case in test_cases:
self.xmodule.score_comment = case[0]
self.assertEqual(
case[1],
self.xmodule.get_context()['comment']
)
def test_lti20_rest_bad_contenttype(self):
"""
Input with bad content type
"""
with self.assertRaisesRegexp(LTIError, "Content-Type must be"):
request = Mock(headers={u'Content-Type': u'Non-existent'})
self.xmodule.verify_lti_2_0_result_rest_headers(request)
def test_lti20_rest_failed_oauth_body_verify(self):
"""
Input with bad oauth body hash verification
"""
err_msg = "OAuth body verification failed"
self.xmodule.verify_oauth_body_sign = Mock(side_effect=LTIError(err_msg))
with self.assertRaisesRegexp(LTIError, err_msg):
request = Mock(headers={u'Content-Type': u'application/vnd.ims.lis.v2.result+json'})
self.xmodule.verify_lti_2_0_result_rest_headers(request)
def test_lti20_rest_good_headers(self):
"""
Input with good oauth body hash verification
"""
self.xmodule.verify_oauth_body_sign = Mock(return_value=True)
request = Mock(headers={u'Content-Type': u'application/vnd.ims.lis.v2.result+json'})
self.xmodule.verify_lti_2_0_result_rest_headers(request)
# We just want the above call to complete without exceptions, and to have called verify_oauth_body_sign
self.assertTrue(self.xmodule.verify_oauth_body_sign.called)
BAD_DISPATCH_INPUTS = [
None,
u"",
u"abcd"
u"notuser/abcd"
u"user/"
u"user//"
u"user/gbere/"
u"user/gbere/xsdf"
u"user/ಠ益ಠ" # not alphanumeric
]
def test_lti20_rest_bad_dispatch(self):
"""
Test the error cases for the "dispatch" argument to the LTI 2.0 handler. Anything that doesn't
fit the form user/<anon_id>
"""
for einput in self.BAD_DISPATCH_INPUTS:
with self.assertRaisesRegexp(LTIError, "No valid user id found in endpoint URL"):
self.xmodule.parse_lti_2_0_handler_suffix(einput)
GOOD_DISPATCH_INPUTS = [
(u"user/abcd3", u"abcd3"),
(u"user/Äbcdè2", u"Äbcdè2"), # unicode, just to make sure
]
def test_lti20_rest_good_dispatch(self):
"""
Test the good cases for the "dispatch" argument to the LTI 2.0 handler. Anything that does
fit the form user/<anon_id>
"""
for ginput, expected in self.GOOD_DISPATCH_INPUTS:
self.assertEquals(self.xmodule.parse_lti_2_0_handler_suffix(ginput), expected)
BAD_JSON_INPUTS = [
# (bad inputs, error message expected)
([
u"kk", # ValueError
u"{{}", # ValueError
u"{}}", # ValueError
3, # TypeError
{}, # TypeError
], u"Supplied JSON string in request body could not be decoded"),
([
u"3", # valid json, not array or object
u"[]", # valid json, array too small
u"[3, {}]", # valid json, 1st element not an object
], u"Supplied JSON string is a list that does not contain an object as the first element"),
([
u'{"@type": "NOTResult"}', # @type key must have value 'Result'
], u"JSON object does not contain correct @type attribute"),
([
# @context missing
u'{"@type": "Result", "resultScore": 0.1}',
], u"JSON object does not contain required key"),
([
u'''
{"@type": "Result",
"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"resultScore": 100}''' # score out of range
], u"score value outside the permitted range of 0-1."),
([
u'''
{"@type": "Result",
"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"resultScore": "1b"}''', # score ValueError
u'''
{"@type": "Result",
"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"resultScore": {}}''', # score TypeError
], u"Could not convert resultScore to float"),
]
def test_lti20_bad_json(self):
"""
Test that bad json_str to parse_lti_2_0_result_json inputs raise appropriate LTI Error
"""
for error_inputs, error_message in self.BAD_JSON_INPUTS:
for einput in error_inputs:
with self.assertRaisesRegexp(LTIError, error_message):
self.xmodule.parse_lti_2_0_result_json(einput)
GOOD_JSON_INPUTS = [
(u'''
{"@type": "Result",
"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"resultScore": 0.1}''', u""), # no comment means we expect ""
(u'''
[{"@type": "Result",
"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@id": "anon_id:abcdef0123456789",
"resultScore": 0.1}]''', u""), # OK to have array of objects -- just take the first. @id is okay too
(u'''
{"@type": "Result",
"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"resultScore": 0.1,
"comment": "ಠ益ಠ"}''', u"ಠ益ಠ"), # unicode comment
]
def test_lti20_good_json(self):
"""
Test the parsing of good comments
"""
for json_str, expected_comment in self.GOOD_JSON_INPUTS:
score, comment = self.xmodule.parse_lti_2_0_result_json(json_str)
self.assertEqual(score, 0.1)
self.assertEqual(comment, expected_comment)
GOOD_JSON_PUT = textwrap.dedent(u"""
{"@type": "Result",
"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@id": "anon_id:abcdef0123456789",
"resultScore": 0.1,
"comment": "ಠ益ಠ"}
""").encode('utf-8')
GOOD_JSON_PUT_LIKE_DELETE = textwrap.dedent(u"""
{"@type": "Result",
"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@id": "anon_id:abcdef0123456789",
"comment": "ಠ益ಠ"}
""").encode('utf-8')
def get_signed_lti20_mock_request(self, body, method=u'PUT'):
"""
Example of signed from LTI 2.0 Provider. Signatures and hashes are example only and won't verify
"""
mock_request = Mock()
mock_request.headers = {
'Content-Type': 'application/vnd.ims.lis.v2.result+json',
'Authorization': (
u'OAuth oauth_nonce="135685044251684026041377608307", '
u'oauth_timestamp="1234567890", oauth_version="1.0", '
u'oauth_signature_method="HMAC-SHA1", '
u'oauth_consumer_key="test_client_key", '
u'oauth_signature="my_signature%3D", '
u'oauth_body_hash="gz+PeJZuF2//n9hNUnDj2v5kN70="'
)
}
mock_request.url = u'http://testurl'
mock_request.http_method = method
mock_request.method = method
mock_request.body = body
return mock_request
USER_STANDIN = Mock()
USER_STANDIN.id = 999
def setup_system_xmodule_mocks_for_lti20_request_test(self):
"""
Helper fn to set up mocking for lti 2.0 request test
"""
self.system.get_real_user = Mock(return_value=self.USER_STANDIN)
self.xmodule.max_score = Mock(return_value=1.0)
self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', u'test_client_secret'))
self.xmodule.verify_oauth_body_sign = Mock()
def test_lti20_put_like_delete_success(self):
"""
The happy path for LTI 2.0 PUT that acts like a delete
"""
self.setup_system_xmodule_mocks_for_lti20_request_test()
SCORE = 0.55 # pylint: disable=invalid-name
COMMENT = u"ಠ益ಠ" # pylint: disable=invalid-name
self.xmodule.module_score = SCORE
self.xmodule.score_comment = COMMENT
mock_request = self.get_signed_lti20_mock_request(self.GOOD_JSON_PUT_LIKE_DELETE)
# Now call the handler
response = self.xmodule.lti_2_0_result_rest_handler(mock_request, "user/abcd")
# Now assert there's no score
self.assertEqual(response.status_code, 200)
self.assertIsNone(self.xmodule.module_score)
self.assertEqual(self.xmodule.score_comment, u"")
(_, evt_type, called_grade_obj), _ = self.system.publish.call_args
self.assertEqual(called_grade_obj, {'user_id': self.USER_STANDIN.id, 'value': None, 'max_value': None})
self.assertEqual(evt_type, 'grade')
def test_lti20_delete_success(self):
"""
The happy path for LTI 2.0 DELETE
"""
self.setup_system_xmodule_mocks_for_lti20_request_test()
SCORE = 0.55 # pylint: disable=invalid-name
COMMENT = u"ಠ益ಠ" # pylint: disable=invalid-name
self.xmodule.module_score = SCORE
self.xmodule.score_comment = COMMENT
mock_request = self.get_signed_lti20_mock_request("", method=u'DELETE')
# Now call the handler
response = self.xmodule.lti_2_0_result_rest_handler(mock_request, "user/abcd")
# Now assert there's no score
self.assertEqual(response.status_code, 200)
self.assertIsNone(self.xmodule.module_score)
self.assertEqual(self.xmodule.score_comment, u"")
(_, evt_type, called_grade_obj), _ = self.system.publish.call_args
self.assertEqual(called_grade_obj, {'user_id': self.USER_STANDIN.id, 'value': None, 'max_value': None})
self.assertEqual(evt_type, 'grade')
def test_lti20_put_set_score_success(self):
"""
The happy path for LTI 2.0 PUT that sets a score
"""
self.setup_system_xmodule_mocks_for_lti20_request_test()
mock_request = self.get_signed_lti20_mock_request(self.GOOD_JSON_PUT)
# Now call the handler
response = self.xmodule.lti_2_0_result_rest_handler(mock_request, "user/abcd")
# Now assert
self.assertEqual(response.status_code, 200)
self.assertEqual(self.xmodule.module_score, 0.1)
self.assertEqual(self.xmodule.score_comment, u"ಠ益ಠ")
(_, evt_type, called_grade_obj), _ = self.system.publish.call_args
self.assertEqual(evt_type, 'grade')
self.assertEqual(called_grade_obj, {'user_id': self.USER_STANDIN.id, 'value': 0.1, 'max_value': 1.0})
def test_lti20_get_no_score_success(self):
"""
The happy path for LTI 2.0 GET when there's no score
"""
self.setup_system_xmodule_mocks_for_lti20_request_test()
mock_request = self.get_signed_lti20_mock_request("", method=u'GET')
# Now call the handler
response = self.xmodule.lti_2_0_result_rest_handler(mock_request, "user/abcd")
# Now assert
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json, {"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type": "Result"})
def test_lti20_get_with_score_success(self):
"""
The happy path for LTI 2.0 GET when there is a score
"""
self.setup_system_xmodule_mocks_for_lti20_request_test()
SCORE = 0.55 # pylint: disable=invalid-name
COMMENT = u"ಠ益ಠ" # pylint: disable=invalid-name
self.xmodule.module_score = SCORE
self.xmodule.score_comment = COMMENT
mock_request = self.get_signed_lti20_mock_request("", method=u'GET')
# Now call the handler
response = self.xmodule.lti_2_0_result_rest_handler(mock_request, "user/abcd")
# Now assert
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json, {"@context": "http://purl.imsglobal.org/ctx/lis/v2/Result",
"@type": "Result",
"resultScore": SCORE,
"comment": COMMENT})
UNSUPPORTED_HTTP_METHODS = ["OPTIONS", "HEAD", "POST", "TRACE", "CONNECT"]
def test_lti20_unsupported_method_error(self):
"""
Test we get a 404 when we don't GET or PUT
"""
self.setup_system_xmodule_mocks_for_lti20_request_test()
mock_request = self.get_signed_lti20_mock_request(self.GOOD_JSON_PUT)
for bad_method in self.UNSUPPORTED_HTTP_METHODS:
mock_request.method = bad_method
response = self.xmodule.lti_2_0_result_rest_handler(mock_request, "user/abcd")
self.assertEqual(response.status_code, 404)
def test_lti20_request_handler_bad_headers(self):
"""
Test that we get a 401 when header verification fails
"""
self.setup_system_xmodule_mocks_for_lti20_request_test()
self.xmodule.verify_lti_2_0_result_rest_headers = Mock(side_effect=LTIError())
mock_request = self.get_signed_lti20_mock_request(self.GOOD_JSON_PUT)
response = self.xmodule.lti_2_0_result_rest_handler(mock_request, "user/abcd")
self.assertEqual(response.status_code, 401)
def test_lti20_request_handler_bad_dispatch_user(self):
"""
Test that we get a 404 when there's no (or badly formatted) user specified in the url
"""
self.setup_system_xmodule_mocks_for_lti20_request_test()
mock_request = self.get_signed_lti20_mock_request(self.GOOD_JSON_PUT)
response = self.xmodule.lti_2_0_result_rest_handler(mock_request, None)
self.assertEqual(response.status_code, 404)
def test_lti20_request_handler_bad_json(self):
"""
Test that we get a 404 when json verification fails
"""
self.setup_system_xmodule_mocks_for_lti20_request_test()
self.xmodule.parse_lti_2_0_result_json = Mock(side_effect=LTIError())
mock_request = self.get_signed_lti20_mock_request(self.GOOD_JSON_PUT)
response = self.xmodule.lti_2_0_result_rest_handler(mock_request, "user/abcd")
self.assertEqual(response.status_code, 404)
def test_lti20_request_handler_bad_user(self):
"""
Test that we get a 404 when the supplied user does not exist
"""
self.setup_system_xmodule_mocks_for_lti20_request_test()
self.system.get_real_user = Mock(return_value=None)
mock_request = self.get_signed_lti20_mock_request(self.GOOD_JSON_PUT)
response = self.xmodule.lti_2_0_result_rest_handler(mock_request, "user/abcd")
self.assertEqual(response.status_code, 404)
def test_lti20_request_handler_grade_past_due(self):
"""
Test that we get a 404 when accept_grades_past_due is False and it is past due
"""
self.setup_system_xmodule_mocks_for_lti20_request_test()
self.xmodule.due = datetime.datetime.now(UTC())
self.xmodule.accept_grades_past_due = False
mock_request = self.get_signed_lti20_mock_request(self.GOOD_JSON_PUT)
response = self.xmodule.lti_2_0_result_rest_handler(mock_request, "user/abcd")
self.assertEqual(response.status_code, 404)
|
jbassen/edx-platform
|
common/lib/xmodule/xmodule/tests/test_lti20_unit.py
|
Python
|
agpl-3.0
| 17,256
|
import models
from django.contrib import admin
admin.site.register(models.SocialFriendList)
|
soplerproject/sopler
|
social_friends_finder/admin.py
|
Python
|
agpl-3.0
| 93
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import math
import re
import time
from _common import ceiling
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
import psycopg2
import openerp.addons.decimal_precision as dp
def ean_checksum(eancode):
"""returns the checksum of an ean string of length 13, returns -1 if the string has the wrong length"""
if len(eancode) != 13:
return -1
oddsum=0
evensum=0
total=0
eanvalue=eancode
reversevalue = eanvalue[::-1]
finalean=reversevalue[1:]
for i in range(len(finalean)):
if i % 2 == 0:
oddsum += int(finalean[i])
else:
evensum += int(finalean[i])
total=(oddsum * 3) + evensum
check = int(10 - math.ceil(total % 10.0)) %10
return check
def check_ean(eancode):
"""returns True if eancode is a valid ean13 string, or null"""
if not eancode:
return True
if len(eancode) != 13:
return False
try:
int(eancode)
except:
return False
return ean_checksum(eancode) == int(eancode[-1])
def sanitize_ean13(ean13):
"""Creates and returns a valid ean13 from an invalid one"""
if not ean13:
return "0000000000000"
ean13 = re.sub("[A-Za-z]","0",ean13);
ean13 = re.sub("[^0-9]","",ean13);
ean13 = ean13[:13]
if len(ean13) < 13:
ean13 = ean13 + '0' * (13-len(ean13))
return ean13[:-1] + str(ean_checksum(ean13))
#----------------------------------------------------------
# UOM
#----------------------------------------------------------
class product_uom_categ(osv.osv):
_name = 'product.uom.categ'
_description = 'Product uom categ'
_columns = {
'name': fields.char('Name', required=True, translate=True),
}
class product_uom(osv.osv):
_name = 'product.uom'
_description = 'Product Unit of Measure'
def _compute_factor_inv(self, factor):
return factor and (1.0 / factor) or 0.0
def _factor_inv(self, cursor, user, ids, name, arg, context=None):
res = {}
for uom in self.browse(cursor, user, ids, context=context):
res[uom.id] = self._compute_factor_inv(uom.factor)
return res
def _factor_inv_write(self, cursor, user, id, name, value, arg, context=None):
return self.write(cursor, user, id, {'factor': self._compute_factor_inv(value)}, context=context)
def name_create(self, cr, uid, name, context=None):
""" The UoM category and factor are required, so we'll have to add temporary values
for imported UoMs """
uom_categ = self.pool.get('product.uom.categ')
# look for the category based on the english name, i.e. no context on purpose!
# TODO: should find a way to have it translated but not created until actually used
categ_misc = 'Unsorted/Imported Units'
categ_id = uom_categ.search(cr, uid, [('name', '=', categ_misc)])
if categ_id:
categ_id = categ_id[0]
else:
categ_id, _ = uom_categ.name_create(cr, uid, categ_misc)
uom_id = self.create(cr, uid, {self._rec_name: name,
'category_id': categ_id,
'factor': 1})
return self.name_get(cr, uid, [uom_id], context=context)[0]
def create(self, cr, uid, data, context=None):
if 'factor_inv' in data:
if data['factor_inv'] != 1:
data['factor'] = self._compute_factor_inv(data['factor_inv'])
del(data['factor_inv'])
return super(product_uom, self).create(cr, uid, data, context)
_order = "name"
_columns = {
'name': fields.char('Unit of Measure', required=True, translate=True),
'category_id': fields.many2one('product.uom.categ', 'Product Category', required=True, ondelete='cascade',
help="Conversion between Units of Measure can only occur if they belong to the same category. The conversion will be made based on the ratios."),
'factor': fields.float('Ratio', required=True,digits=(12, 12),
help='How much bigger or smaller this unit is compared to the reference Unit of Measure for this category:\n'\
'1 * (reference unit) = ratio * (this unit)'),
'factor_inv': fields.function(_factor_inv, digits=(12,12),
fnct_inv=_factor_inv_write,
string='Bigger Ratio',
help='How many times this Unit of Measure is bigger than the reference Unit of Measure in this category:\n'\
'1 * (this unit) = ratio * (reference unit)', required=True),
'rounding': fields.float('Rounding Precision', digits_compute=dp.get_precision('Product Unit of Measure'), required=True,
help="The computed quantity will be a multiple of this value. "\
"Use 1.0 for a Unit of Measure that cannot be further split, such as a piece."),
'active': fields.boolean('Active', help="By unchecking the active field you can disable a unit of measure without deleting it."),
'uom_type': fields.selection([('bigger','Bigger than the reference Unit of Measure'),
('reference','Reference Unit of Measure for this category'),
('smaller','Smaller than the reference Unit of Measure')],'Type', required=1),
}
_defaults = {
'active': 1,
'rounding': 0.01,
'uom_type': 'reference',
}
_sql_constraints = [
('factor_gt_zero', 'CHECK (factor!=0)', 'The conversion ratio for a unit of measure cannot be 0!')
]
def _compute_qty(self, cr, uid, from_uom_id, qty, to_uom_id=False, round=True):
if not from_uom_id or not qty or not to_uom_id:
return qty
uoms = self.browse(cr, uid, [from_uom_id, to_uom_id])
if uoms[0].id == from_uom_id:
from_unit, to_unit = uoms[0], uoms[-1]
else:
from_unit, to_unit = uoms[-1], uoms[0]
return self._compute_qty_obj(cr, uid, from_unit, qty, to_unit, round=round)
def _compute_qty_obj(self, cr, uid, from_unit, qty, to_unit, round=True, context=None):
if context is None:
context = {}
if from_unit.category_id.id != to_unit.category_id.id:
if context.get('raise-exception', True):
raise osv.except_osv(_('Error!'), _('Conversion from Product UoM %s to Default UoM %s is not possible as they both belong to different Category!.') % (from_unit.name,to_unit.name,))
else:
return qty
amount = qty / from_unit.factor
if to_unit:
amount = amount * to_unit.factor
if round:
amount = ceiling(amount, to_unit.rounding)
return amount
def _compute_price(self, cr, uid, from_uom_id, price, to_uom_id=False):
if not from_uom_id or not price or not to_uom_id:
return price
from_unit, to_unit = self.browse(cr, uid, [from_uom_id, to_uom_id])
if from_unit.category_id.id != to_unit.category_id.id:
return price
amount = price * from_unit.factor
if to_uom_id:
amount = amount / to_unit.factor
return amount
def onchange_type(self, cursor, user, ids, value):
if value == 'reference':
return {'value': {'factor': 1, 'factor_inv': 1}}
return {}
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
if 'category_id' in vals:
for uom in self.browse(cr, uid, ids, context=context):
if uom.category_id.id != vals['category_id']:
raise osv.except_osv(_('Warning!'),_("Cannot change the category of existing Unit of Measure '%s'.") % (uom.name,))
return super(product_uom, self).write(cr, uid, ids, vals, context=context)
class product_ul(osv.osv):
_name = "product.ul"
_description = "Logistic Unit"
_columns = {
'name' : fields.char('Name', select=True, required=True, translate=True),
'type' : fields.selection([('unit','Unit'),('pack','Pack'),('box', 'Box'), ('pallet', 'Pallet')], 'Type', required=True),
'height': fields.float('Height', help='The height of the package'),
'width': fields.float('Width', help='The width of the package'),
'length': fields.float('Length', help='The length of the package'),
'weight': fields.float('Empty Package Weight'),
}
#----------------------------------------------------------
# Categories
#----------------------------------------------------------
class product_category(osv.osv):
def name_get(self, cr, uid, ids, context=None):
if isinstance(ids, (list, tuple)) and not len(ids):
return []
if isinstance(ids, (long, int)):
ids = [ids]
reads = self.read(cr, uid, ids, ['name','parent_id'], context=context)
res = []
for record in reads:
name = record['name']
if record['parent_id']:
name = record['parent_id'][1]+' / '+name
res.append((record['id'], name))
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if not context:
context = {}
if name:
# Be sure name_search is symetric to name_get
name = name.split(' / ')[-1]
ids = self.search(cr, uid, [('name', operator, name)] + args, limit=limit, context=context)
else:
ids = self.search(cr, uid, args, limit=limit, context=context)
return self.name_get(cr, uid, ids, context)
def _name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = self.name_get(cr, uid, ids, context=context)
return dict(res)
_name = "product.category"
_description = "Product Category"
_columns = {
'name': fields.char('Name', required=True, translate=True, select=True),
'complete_name': fields.function(_name_get_fnc, type="char", string='Name'),
'parent_id': fields.many2one('product.category','Parent Category', select=True, ondelete='cascade'),
'child_id': fields.one2many('product.category', 'parent_id', string='Child Categories'),
'sequence': fields.integer('Sequence', select=True, help="Gives the sequence order when displaying a list of product categories."),
'type': fields.selection([('view','View'), ('normal','Normal')], 'Category Type', help="A category of the view type is a virtual category that can be used as the parent of another category to create a hierarchical structure."),
'parent_left': fields.integer('Left Parent', select=1),
'parent_right': fields.integer('Right Parent', select=1),
}
_defaults = {
'type' : 'normal',
}
_parent_name = "parent_id"
_parent_store = True
_parent_order = 'sequence, name'
_order = 'parent_left'
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create recursive categories.', ['parent_id'])
]
class produce_price_history(osv.osv):
"""
Keep track of the ``product.template`` standard prices as they are changed.
"""
_name = 'product.price.history'
_rec_name = 'datetime'
_order = 'datetime desc'
_columns = {
'company_id': fields.many2one('res.company', required=True),
'product_template_id': fields.many2one('product.template', 'Product Template', required=True, ondelete='cascade'),
'datetime': fields.datetime('Historization Time'),
'cost': fields.float('Historized Cost'),
}
def _get_default_company(self, cr, uid, context=None):
if 'force_company' in context:
return context['force_company']
else:
company = self.pool['res.users'].browse(cr, uid, uid,
context=context).company_id
return company.id if company else False
_defaults = {
'datetime': fields.datetime.now,
'company_id': _get_default_company,
}
#----------------------------------------------------------
# Product Attributes
#----------------------------------------------------------
class product_attribute(osv.osv):
_name = "product.attribute"
_description = "Product Attribute"
_columns = {
'name': fields.char('Name', translate=True, required=True),
'value_ids': fields.one2many('product.attribute.value', 'attribute_id', 'Values', copy=True),
}
class product_attribute_value(osv.osv):
_name = "product.attribute.value"
_order = 'sequence'
def _get_price_extra(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, 0)
if not context.get('active_id'):
return result
for obj in self.browse(cr, uid, ids, context=context):
for price_id in obj.price_ids:
if price_id.product_tmpl_id.id == context.get('active_id'):
result[obj.id] = price_id.price_extra
break
return result
def _set_price_extra(self, cr, uid, id, name, value, args, context=None):
if context is None:
context = {}
if 'active_id' not in context:
return None
p_obj = self.pool['product.attribute.price']
p_ids = p_obj.search(cr, uid, [('value_id', '=', id), ('product_tmpl_id', '=', context['active_id'])], context=context)
if p_ids:
p_obj.write(cr, uid, p_ids, {'price_extra': value}, context=context)
else:
p_obj.create(cr, uid, {
'product_tmpl_id': context['active_id'],
'value_id': id,
'price_extra': value,
}, context=context)
_columns = {
'sequence': fields.integer('Sequence', help="Determine the display order"),
'name': fields.char('Value', translate=True, required=True),
'attribute_id': fields.many2one('product.attribute', 'Attribute', required=True, ondelete='cascade'),
'product_ids': fields.many2many('product.product', id1='att_id', id2='prod_id', string='Variants', readonly=True),
'price_extra': fields.function(_get_price_extra, type='float', string='Attribute Price Extra',
fnct_inv=_set_price_extra,
digits_compute=dp.get_precision('Product Price'),
help="Price Extra: Extra price for the variant with this attribute value on sale price. eg. 200 price extra, 1000 + 200 = 1200."),
'price_ids': fields.one2many('product.attribute.price', 'value_id', string='Attribute Prices', readonly=True),
}
_sql_constraints = [
('value_company_uniq', 'unique (name,attribute_id)', 'This attribute value already exists !')
]
_defaults = {
'price_extra': 0.0,
}
def unlink(self, cr, uid, ids, context=None):
ctx = dict(context or {}, active_test=False)
product_ids = self.pool['product.product'].search(cr, uid, [('attribute_value_ids', 'in', ids)], context=ctx)
if product_ids:
raise osv.except_osv(_('Integrity Error!'), _('The operation cannot be completed:\nYou trying to delete an attribute value with a reference on a product variant.'))
return super(product_attribute_value, self).unlink(cr, uid, ids, context=context)
class product_attribute_price(osv.osv):
_name = "product.attribute.price"
_columns = {
'product_tmpl_id': fields.many2one('product.template', 'Product Template', required=True, ondelete='cascade'),
'value_id': fields.many2one('product.attribute.value', 'Product Attribute Value', required=True, ondelete='cascade'),
'price_extra': fields.float('Price Extra', digits_compute=dp.get_precision('Product Price')),
}
class product_attribute_line(osv.osv):
_name = "product.attribute.line"
_rec_name = 'attribute_id'
_columns = {
'product_tmpl_id': fields.many2one('product.template', 'Product Template', required=True, ondelete='cascade'),
'attribute_id': fields.many2one('product.attribute', 'Attribute', required=True, ondelete='restrict'),
'value_ids': fields.many2many('product.attribute.value', id1='line_id', id2='val_id', string='Product Attribute Value'),
}
#----------------------------------------------------------
# Products
#----------------------------------------------------------
class product_template(osv.osv):
_name = "product.template"
_inherit = ['mail.thread']
_description = "Product Template"
_order = "name"
def _get_image(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = tools.image_get_resized_images(obj.image, avoid_resize_medium=True)
return result
def _set_image(self, cr, uid, id, name, value, args, context=None):
return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context)
def _is_product_variant(self, cr, uid, ids, name, arg, context=None):
return self._is_product_variant_impl(cr, uid, ids, name, arg, context=context)
def _is_product_variant_impl(self, cr, uid, ids, name, arg, context=None):
return dict.fromkeys(ids, False)
def _product_template_price(self, cr, uid, ids, name, arg, context=None):
plobj = self.pool.get('product.pricelist')
res = {}
quantity = context.get('quantity') or 1.0
pricelist = context.get('pricelist', False)
partner = context.get('partner', False)
if pricelist:
# Support context pricelists specified as display_name or ID for compatibility
if isinstance(pricelist, basestring):
pricelist_ids = plobj.name_search(
cr, uid, pricelist, operator='=', context=context, limit=1)
pricelist = pricelist_ids[0][0] if pricelist_ids else pricelist
if isinstance(pricelist, (int, long)):
products = self.browse(cr, uid, ids, context=context)
qtys = map(lambda x: (x, quantity, partner), products)
pl = plobj.browse(cr, uid, pricelist, context=context)
price = plobj._price_get_multi(cr,uid, pl, qtys, context=context)
for id in ids:
res[id] = price.get(id, 0.0)
for id in ids:
res.setdefault(id, 0.0)
return res
def get_history_price(self, cr, uid, product_tmpl, company_id, date=None, context=None):
if context is None:
context = {}
if date is None:
date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
price_history_obj = self.pool.get('product.price.history')
history_ids = price_history_obj.search(cr, uid, [('company_id', '=', company_id), ('product_template_id', '=', product_tmpl), ('datetime', '<=', date)], limit=1)
if history_ids:
return price_history_obj.read(cr, uid, history_ids[0], ['cost'], context=context)['cost']
return 0.0
def _set_standard_price(self, cr, uid, product_tmpl_id, value, context=None):
''' Store the standard price change in order to be able to retrieve the cost of a product template for a given date'''
if context is None:
context = {}
price_history_obj = self.pool['product.price.history']
user_company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
company_id = context.get('force_company', user_company)
price_history_obj.create(cr, uid, {
'product_template_id': product_tmpl_id,
'cost': value,
'company_id': company_id,
}, context=context)
def _get_product_variant_count(self, cr, uid, ids, name, arg, context=None):
res = {}
for product in self.browse(cr, uid, ids):
res[product.id] = len(product.product_variant_ids)
return res
_columns = {
'name': fields.char('Name', required=True, translate=True, select=True),
'product_manager': fields.many2one('res.users','Product Manager'),
'description': fields.text('Description',translate=True,
help="A precise description of the Product, used only for internal information purposes."),
'description_purchase': fields.text('Purchase Description',translate=True,
help="A description of the Product that you want to communicate to your suppliers. "
"This description will be copied to every Purchase Order, Receipt and Supplier Invoice/Refund."),
'description_sale': fields.text('Sale Description',translate=True,
help="A description of the Product that you want to communicate to your customers. "
"This description will be copied to every Sale Order, Delivery Order and Customer Invoice/Refund"),
'type': fields.selection([('consu', 'Consumable'),('service','Service')], 'Product Type', required=True, help="Consumable are product where you don't manage stock, a service is a non-material product provided by a company or an individual."),
'rental': fields.boolean('Can be Rent'),
'categ_id': fields.many2one('product.category','Internal Category', required=True, change_default=True, domain="[('type','=','normal')]" ,help="Select category for the current product"),
'price': fields.function(_product_template_price, type='float', string='Price', digits_compute=dp.get_precision('Product Price')),
'list_price': fields.float('Sale Price', digits_compute=dp.get_precision('Product Price'), help="Base price to compute the customer price. Sometimes called the catalog price."),
'lst_price' : fields.related('list_price', type="float", string='Public Price', digits_compute=dp.get_precision('Product Price')),
'standard_price': fields.property(type = 'float', digits_compute=dp.get_precision('Product Price'),
help="Cost price of the product template used for standard stock valuation in accounting and used as a base price on purchase orders.",
groups="base.group_user", string="Cost Price"),
'volume': fields.float('Volume', help="The volume in m3."),
'weight': fields.float('Gross Weight', digits_compute=dp.get_precision('Stock Weight'), help="The gross weight in Kg."),
'weight_net': fields.float('Net Weight', digits_compute=dp.get_precision('Stock Weight'), help="The net weight in Kg."),
'warranty': fields.float('Warranty'),
'sale_ok': fields.boolean('Can be Sold', help="Specify if the product can be selected in a sales order line."),
'pricelist_id': fields.dummy(string='Pricelist', relation='product.pricelist', type='many2one'),
'state': fields.selection([('',''),
('draft', 'In Development'),
('sellable','Normal'),
('end','End of Lifecycle'),
('obsolete','Obsolete')], 'Status'),
'uom_id': fields.many2one('product.uom', 'Unit of Measure', required=True, help="Default Unit of Measure used for all stock operation."),
'uom_po_id': fields.many2one('product.uom', 'Purchase Unit of Measure', required=True, help="Default Unit of Measure used for purchase orders. It must be in the same category than the default unit of measure."),
'uos_id' : fields.many2one('product.uom', 'Unit of Sale',
help='Specify a unit of measure here if invoicing is made in another unit of measure than inventory. Keep empty to use the default unit of measure.'),
'uos_coeff': fields.float('Unit of Measure -> UOS Coeff', digits_compute= dp.get_precision('Product UoS'),
help='Coefficient to convert default Unit of Measure to Unit of Sale\n'
' uos = uom * coeff'),
'mes_type': fields.selection((('fixed', 'Fixed'), ('variable', 'Variable')), 'Measure Type'),
'company_id': fields.many2one('res.company', 'Company', select=1),
# image: all image fields are base64 encoded and PIL-supported
'image': fields.binary("Image",
help="This field holds the image used as image for the product, limited to 1024x1024px."),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized image", type="binary", multi="_get_image",
store={
'product.template': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized image of the product. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved, "\
"only when the image exceeds one of those sizes. Use this field in form views or some kanban views."),
'image_small': fields.function(_get_image, fnct_inv=_set_image,
string="Small-sized image", type="binary", multi="_get_image",
store={
'product.template': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Small-sized image of the product. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required."),
'packaging_ids': fields.one2many(
'product.packaging', 'product_tmpl_id', 'Logistical Units',
help="Gives the different ways to package the same product. This has no impact on "
"the picking order and is mainly used if you use the EDI module."),
'seller_ids': fields.one2many('product.supplierinfo', 'product_tmpl_id', 'Supplier'),
'seller_delay': fields.related('seller_ids','delay', type='integer', string='Supplier Lead Time',
help="This is the average delay in days between the purchase order confirmation and the receipts for this product and for the default supplier. It is used by the scheduler to order requests based on reordering delays."),
'seller_qty': fields.related('seller_ids','qty', type='float', string='Supplier Quantity',
help="This is minimum quantity to purchase from Main Supplier."),
'seller_id': fields.related('seller_ids','name', type='many2one', relation='res.partner', string='Main Supplier',
help="Main Supplier who has highest priority in Supplier List."),
'active': fields.boolean('Active', help="If unchecked, it will allow you to hide the product without removing it."),
'color': fields.integer('Color Index'),
'is_product_variant': fields.function( _is_product_variant, type='boolean', string='Only one product variant'),
'attribute_line_ids': fields.one2many('product.attribute.line', 'product_tmpl_id', 'Product Attributes'),
'product_variant_ids': fields.one2many('product.product', 'product_tmpl_id', 'Products', required=True),
'product_variant_count': fields.function( _get_product_variant_count, type='integer', string='# of Product Variants'),
# related to display product product information if is_product_variant
'ean13': fields.related('product_variant_ids', 'ean13', type='char', string='EAN13 Barcode'),
'default_code': fields.related('product_variant_ids', 'default_code', type='char', string='Internal Reference'),
}
def _price_get_list_price(self, product):
return 0.0
def _price_get(self, cr, uid, products, ptype='list_price', context=None):
if context is None:
context = {}
if 'currency_id' in context:
pricetype_obj = self.pool.get('product.price.type')
price_type_id = pricetype_obj.search(cr, uid, [('field','=',ptype)])[0]
price_type_currency_id = pricetype_obj.browse(cr,uid,price_type_id).currency_id.id
res = {}
product_uom_obj = self.pool.get('product.uom')
for product in products:
res[product.id] = product[ptype] or 0.0
if ptype == 'list_price':
res[product.id] += product._name == "product.product" and product.price_extra or 0.0
if 'uom' in context:
uom = product.uom_id or product.uos_id
res[product.id] = product_uom_obj._compute_price(cr, uid,
uom.id, res[product.id], context['uom'])
# Convert from price_type currency to asked one
if 'currency_id' in context:
# Take the price_type currency from the product field
# This is right cause a field cannot be in more than one currency
res[product.id] = self.pool.get('res.currency').compute(cr, uid, price_type_currency_id,
context['currency_id'], res[product.id],context=context)
return res
def _get_uom_id(self, cr, uid, *args):
return self.pool["product.uom"].search(cr, uid, [], limit=1, order='id')[0]
def _default_category(self, cr, uid, context=None):
if context is None:
context = {}
if 'categ_id' in context and context['categ_id']:
return context['categ_id']
md = self.pool.get('ir.model.data')
res = False
try:
res = md.get_object_reference(cr, uid, 'product', 'product_category_all')[1]
except ValueError:
res = False
return res
def onchange_uom(self, cursor, user, ids, uom_id, uom_po_id):
if uom_id:
return {'value': {'uom_po_id': uom_id}}
return {}
def create_variant_ids(self, cr, uid, ids, context=None):
product_obj = self.pool.get("product.product")
ctx = context and context.copy() or {}
if ctx.get("create_product_variant"):
return None
ctx.update(active_test=False, create_product_variant=True)
tmpl_ids = self.browse(cr, uid, ids, context=ctx)
for tmpl_id in tmpl_ids:
# list of values combination
all_variants = [[]]
for variant_id in tmpl_id.attribute_line_ids:
if len(variant_id.value_ids) > 1:
temp_variants = []
for value_id in variant_id.value_ids:
for variant in all_variants:
temp_variants.append(variant + [int(value_id)])
all_variants = temp_variants
# check product
variant_ids_to_active = []
variants_active_ids = []
variants_inactive = []
for product_id in tmpl_id.product_variant_ids:
variants = map(int,product_id.attribute_value_ids)
if variants in all_variants:
variants_active_ids.append(product_id.id)
all_variants.pop(all_variants.index(variants))
if not product_id.active:
variant_ids_to_active.append(product_id.id)
else:
variants_inactive.append(product_id)
if variant_ids_to_active:
product_obj.write(cr, uid, variant_ids_to_active, {'active': True}, context=ctx)
# create new product
for variant_ids in all_variants:
values = {
'product_tmpl_id': tmpl_id.id,
'attribute_value_ids': [(6, 0, variant_ids)]
}
id = product_obj.create(cr, uid, values, context=ctx)
variants_active_ids.append(id)
# unlink or inactive product
for variant_id in map(int,variants_inactive):
try:
with cr.savepoint():
product_obj.unlink(cr, uid, [variant_id], context=ctx)
except (psycopg2.Error, osv.except_osv):
product_obj.write(cr, uid, [variant_id], {'active': False}, context=ctx)
pass
return True
def create(self, cr, uid, vals, context=None):
''' Store the initial standard price in order to be able to retrieve the cost of a product template for a given date'''
product_template_id = super(product_template, self).create(cr, uid, vals, context=context)
if not context or "create_product_product" not in context:
self.create_variant_ids(cr, uid, [product_template_id], context=context)
self._set_standard_price(cr, uid, product_template_id, vals.get('standard_price', 0.0), context=context)
# TODO: this is needed to set given values to first variant after creation
# these fields should be moved to product as lead to confusion
related_vals = {}
if vals.get('ean13'):
related_vals['ean13'] = vals['ean13']
if vals.get('default_code'):
related_vals['default_code'] = vals['default_code']
if related_vals:
self.write(cr, uid, product_template_id, related_vals, context=context)
return product_template_id
def write(self, cr, uid, ids, vals, context=None):
''' Store the standard price change in order to be able to retrieve the cost of a product template for a given date'''
if isinstance(ids, (int, long)):
ids = [ids]
if 'uom_po_id' in vals:
new_uom = self.pool.get('product.uom').browse(cr, uid, vals['uom_po_id'], context=context)
for product in self.browse(cr, uid, ids, context=context):
old_uom = product.uom_po_id
if old_uom.category_id.id != new_uom.category_id.id:
raise osv.except_osv(_('Unit of Measure categories Mismatch!'), _("New Unit of Measure '%s' must belong to same Unit of Measure category '%s' as of old Unit of Measure '%s'. If you need to change the unit of measure, you may deactivate this product from the 'Procurements' tab and create a new one.") % (new_uom.name, old_uom.category_id.name, old_uom.name,))
if 'standard_price' in vals:
for prod_template_id in ids:
self._set_standard_price(cr, uid, prod_template_id, vals['standard_price'], context=context)
res = super(product_template, self).write(cr, uid, ids, vals, context=context)
if 'attribute_line_ids' in vals or vals.get('active'):
self.create_variant_ids(cr, uid, ids, context=context)
if 'active' in vals and not vals.get('active'):
ctx = context and context.copy() or {}
ctx.update(active_test=False)
product_ids = []
for product in self.browse(cr, uid, ids, context=ctx):
product_ids = map(int,product.product_variant_ids)
self.pool.get("product.product").write(cr, uid, product_ids, {'active': vals.get('active')}, context=ctx)
return res
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
template = self.browse(cr, uid, id, context=context)
default['name'] = _("%s (copy)") % (template['name'])
return super(product_template, self).copy(cr, uid, id, default=default, context=context)
_defaults = {
'company_id': lambda s,cr,uid,c: s.pool.get('res.company')._company_default_get(cr, uid, 'product.template', context=c),
'list_price': 1,
'standard_price': 0.0,
'sale_ok': 1,
'uom_id': _get_uom_id,
'uom_po_id': _get_uom_id,
'uos_coeff': 1.0,
'mes_type': 'fixed',
'categ_id' : _default_category,
'type' : 'consu',
'active': True,
}
def _check_uom(self, cursor, user, ids, context=None):
for product in self.browse(cursor, user, ids, context=context):
if product.uom_id.category_id.id != product.uom_po_id.category_id.id:
return False
return True
def _check_uos(self, cursor, user, ids, context=None):
for product in self.browse(cursor, user, ids, context=context):
if product.uos_id \
and product.uos_id.category_id.id \
== product.uom_id.category_id.id:
return False
return True
_constraints = [
(_check_uom, 'Error: The default Unit of Measure and the purchase Unit of Measure must be in the same category.', ['uom_id']),
]
def name_get(self, cr, user, ids, context=None):
if context is None:
context = {}
if 'partner_id' in context:
pass
return super(product_template, self).name_get(cr, user, ids, context)
class product_product(osv.osv):
_name = "product.product"
_description = "Product"
_inherits = {'product.template': 'product_tmpl_id'}
_inherit = ['mail.thread']
_order = 'default_code,name_template'
def _product_price(self, cr, uid, ids, name, arg, context=None):
plobj = self.pool.get('product.pricelist')
res = {}
if context is None:
context = {}
quantity = context.get('quantity') or 1.0
pricelist = context.get('pricelist', False)
partner = context.get('partner', False)
if pricelist:
# Support context pricelists specified as display_name or ID for compatibility
if isinstance(pricelist, basestring):
pricelist_ids = plobj.name_search(
cr, uid, pricelist, operator='=', context=context, limit=1)
pricelist = pricelist_ids[0][0] if pricelist_ids else pricelist
if isinstance(pricelist, (int, long)):
products = self.browse(cr, uid, ids, context=context)
qtys = map(lambda x: (x, quantity, partner), products)
pl = plobj.browse(cr, uid, pricelist, context=context)
price = plobj._price_get_multi(cr,uid, pl, qtys, context=context)
for id in ids:
res[id] = price.get(id, 0.0)
for id in ids:
res.setdefault(id, 0.0)
return res
def view_header_get(self, cr, uid, view_id, view_type, context=None):
if context is None:
context = {}
res = super(product_product, self).view_header_get(cr, uid, view_id, view_type, context)
if (context.get('categ_id', False)):
return _('Products: ') + self.pool.get('product.category').browse(cr, uid, context['categ_id'], context=context).name
return res
def _product_lst_price(self, cr, uid, ids, name, arg, context=None):
product_uom_obj = self.pool.get('product.uom')
res = dict.fromkeys(ids, 0.0)
for product in self.browse(cr, uid, ids, context=context):
if 'uom' in context:
uom = product.uos_id or product.uom_id
res[product.id] = product_uom_obj._compute_price(cr, uid,
uom.id, product.list_price, context['uom'])
else:
res[product.id] = product.list_price
res[product.id] = res[product.id] + product.price_extra
return res
def _set_product_lst_price(self, cr, uid, id, name, value, args, context=None):
product_uom_obj = self.pool.get('product.uom')
product = self.browse(cr, uid, id, context=context)
if 'uom' in context:
uom = product.uos_id or product.uom_id
value = product_uom_obj._compute_price(cr, uid,
context['uom'], value, uom.id)
value = value - product.price_extra
return product.write({'list_price': value}, context=context)
def _get_partner_code_name(self, cr, uid, ids, product, partner_id, context=None):
for supinfo in product.seller_ids:
if supinfo.name.id == partner_id:
return {'code': supinfo.product_code or product.default_code, 'name': supinfo.product_name or product.name}
res = {'code': product.default_code, 'name': product.name}
return res
def _product_code(self, cr, uid, ids, name, arg, context=None):
res = {}
if context is None:
context = {}
for p in self.browse(cr, uid, ids, context=context):
res[p.id] = self._get_partner_code_name(cr, uid, [], p, context.get('partner_id', None), context=context)['code']
return res
def _product_partner_ref(self, cr, uid, ids, name, arg, context=None):
res = {}
if context is None:
context = {}
for p in self.browse(cr, uid, ids, context=context):
data = self._get_partner_code_name(cr, uid, [], p, context.get('partner_id', None), context=context)
if not data['code']:
data['code'] = p.code
if not data['name']:
data['name'] = p.name
res[p.id] = (data['code'] and ('['+data['code']+'] ') or '') + (data['name'] or '')
return res
def _is_product_variant_impl(self, cr, uid, ids, name, arg, context=None):
return dict.fromkeys(ids, True)
def _get_name_template_ids(self, cr, uid, ids, context=None):
result = set()
template_ids = self.pool.get('product.product').search(cr, uid, [('product_tmpl_id', 'in', ids)])
for el in template_ids:
result.add(el)
return list(result)
def _get_image_variant(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = obj.image_variant or getattr(obj.product_tmpl_id, name)
return result
def _set_image_variant(self, cr, uid, id, name, value, args, context=None):
image = tools.image_resize_image_big(value)
res = self.write(cr, uid, [id], {'image_variant': image}, context=context)
product = self.browse(cr, uid, id, context=context)
if not product.product_tmpl_id.image:
product.write({'image_variant': None}, context=context)
product.product_tmpl_id.write({'image': image}, context=context)
return res
def _get_price_extra(self, cr, uid, ids, name, args, context=None):
result = dict.fromkeys(ids, False)
for product in self.browse(cr, uid, ids, context=context):
price_extra = 0.0
for variant_id in product.attribute_value_ids:
for price_id in variant_id.price_ids:
if price_id.product_tmpl_id.id == product.product_tmpl_id.id:
price_extra += price_id.price_extra
result[product.id] = price_extra
return result
_columns = {
'price': fields.function(_product_price, type='float', string='Price', digits_compute=dp.get_precision('Product Price')),
'price_extra': fields.function(_get_price_extra, type='float', string='Variant Extra Price', help="This is le sum of the extra price of all attributes"),
'lst_price': fields.function(_product_lst_price, fnct_inv=_set_product_lst_price, type='float', string='Public Price', digits_compute=dp.get_precision('Product Price')),
'code': fields.function(_product_code, type='char', string='Internal Reference'),
'partner_ref' : fields.function(_product_partner_ref, type='char', string='Customer ref'),
'default_code' : fields.char('Internal Reference', select=True),
'active': fields.boolean('Active', help="If unchecked, it will allow you to hide the product without removing it."),
'product_tmpl_id': fields.many2one('product.template', 'Product Template', required=True, ondelete="cascade", select=True, auto_join=True),
'ean13': fields.char('EAN13 Barcode', size=13, help="International Article Number used for product identification."),
'name_template': fields.related('product_tmpl_id', 'name', string="Template Name", type='char', store={
'product.template': (_get_name_template_ids, ['name'], 10),
'product.product': (lambda self, cr, uid, ids, c=None: ids, [], 10),
}, select=True),
'attribute_value_ids': fields.many2many('product.attribute.value', id1='prod_id', id2='att_id', string='Attributes', readonly=True, ondelete='restrict'),
# image: all image fields are base64 encoded and PIL-supported
'image_variant': fields.binary("Variant Image",
help="This field holds the image used as image for the product variant, limited to 1024x1024px."),
'image': fields.function(_get_image_variant, fnct_inv=_set_image_variant,
string="Big-sized image", type="binary",
help="Image of the product variant (Big-sized image of product template if false). It is automatically "\
"resized as a 1024x1024px image, with aspect ratio preserved."),
'image_small': fields.function(_get_image_variant, fnct_inv=_set_image_variant,
string="Small-sized image", type="binary",
help="Image of the product variant (Small-sized image of product template if false)."),
'image_medium': fields.function(_get_image_variant, fnct_inv=_set_image_variant,
string="Medium-sized image", type="binary",
help="Image of the product variant (Medium-sized image of product template if false)."),
}
_defaults = {
'active': 1,
'color': 0,
}
def unlink(self, cr, uid, ids, context=None):
unlink_ids = []
unlink_product_tmpl_ids = []
for product in self.browse(cr, uid, ids, context=context):
# Check if product still exists, in case it has been unlinked by unlinking its template
if not product.exists():
continue
tmpl_id = product.product_tmpl_id.id
# Check if the product is last product of this template
other_product_ids = self.search(cr, uid, [('product_tmpl_id', '=', tmpl_id), ('id', '!=', product.id)], context=context)
if not other_product_ids:
unlink_product_tmpl_ids.append(tmpl_id)
unlink_ids.append(product.id)
res = super(product_product, self).unlink(cr, uid, unlink_ids, context=context)
# delete templates after calling super, as deleting template could lead to deleting
# products due to ondelete='cascade'
self.pool.get('product.template').unlink(cr, uid, unlink_product_tmpl_ids, context=context)
return res
def onchange_uom(self, cursor, user, ids, uom_id, uom_po_id):
if uom_id and uom_po_id:
uom_obj=self.pool.get('product.uom')
uom=uom_obj.browse(cursor,user,[uom_id])[0]
uom_po=uom_obj.browse(cursor,user,[uom_po_id])[0]
if uom.category_id.id != uom_po.category_id.id:
return {'value': {'uom_po_id': uom_id}}
return False
def _check_ean_key(self, cr, uid, ids, context=None):
for product in self.read(cr, uid, ids, ['ean13'], context=context):
if not check_ean(product['ean13']):
return False
return True
_constraints = [(_check_ean_key, 'You provided an invalid "EAN13 Barcode" reference. You may use the "Internal Reference" field instead.', ['ean13'])]
def on_order(self, cr, uid, ids, orderline, quantity):
pass
def name_get(self, cr, user, ids, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not len(ids):
return []
def _name_get(d):
name = d.get('name','')
code = context.get('display_default_code', True) and d.get('default_code',False) or False
if code:
name = '[%s] %s' % (code,name)
return (d['id'], name)
partner_id = context.get('partner_id', False)
# all user don't have access to seller and partner
# check access and use superuser
self.check_access_rights(cr, user, "read")
self.check_access_rule(cr, user, ids, "read", context=context)
result = []
for product in self.browse(cr, SUPERUSER_ID, ids, context=context):
variant = ", ".join([v.name for v in product.attribute_value_ids])
name = variant and "%s (%s)" % (product.name, variant) or product.name
sellers = []
if partner_id:
sellers = filter(lambda x: x.name.id == partner_id, product.seller_ids)
if sellers:
for s in sellers:
mydict = {
'id': product.id,
'name': s.product_name or name,
'default_code': s.product_code or product.default_code,
}
result.append(_name_get(mydict))
else:
mydict = {
'id': product.id,
'name': name,
'default_code': product.default_code,
}
result.append(_name_get(mydict))
return result
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if name:
ids = self.search(cr, user, [('default_code','=',name)]+ args, limit=limit, context=context)
if not ids:
ids = self.search(cr, user, [('ean13','=',name)]+ args, limit=limit, context=context)
if not ids:
# Do not merge the 2 next lines into one single search, SQL search performance would be abysmal
# on a database with thousands of matching products, due to the huge merge+unique needed for the
# OR operator (and given the fact that the 'name' lookup results come from the ir.translation table
# Performing a quick memory merge of ids in Python will give much better performance
ids = set(self.search(cr, user, args + [('default_code', operator, name)], limit=limit, context=context))
if not limit or len(ids) < limit:
# we may underrun the limit because of dupes in the results, that's fine
limit2 = (limit - len(ids)) if limit else False
ids.update(self.search(cr, user, args + [('name', operator, name)], limit=limit2, context=context))
ids = list(ids)
if not ids:
ptrn = re.compile('(\[(.*?)\])')
res = ptrn.search(name)
if res:
ids = self.search(cr, user, [('default_code','=', res.group(2))] + args, limit=limit, context=context)
else:
ids = self.search(cr, user, args, limit=limit, context=context)
result = self.name_get(cr, user, ids, context=context)
return result
#
# Could be overrided for variants matrices prices
#
def price_get(self, cr, uid, ids, ptype='list_price', context=None):
products = self.browse(cr, uid, ids, context=context)
return self.pool.get("product.template")._price_get(cr, uid, products, ptype=ptype, context=context)
def copy(self, cr, uid, id, default=None, context=None):
if context is None:
context={}
product = self.browse(cr, uid, id, context)
if context.get('variant'):
# if we copy a variant or create one, we keep the same template
default['product_tmpl_id'] = product.product_tmpl_id.id
elif 'name' not in default:
default['name'] = _("%s (copy)") % (product.name,)
return super(product_product, self).copy(cr, uid, id, default=default, context=context)
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
if context.get('search_default_categ_id'):
args.append((('categ_id', 'child_of', context['search_default_categ_id'])))
return super(product_product, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count)
def open_product_template(self, cr, uid, ids, context=None):
""" Utility method used to add an "Open Template" button in product views """
product = self.browse(cr, uid, ids[0], context=context)
return {'type': 'ir.actions.act_window',
'res_model': 'product.template',
'view_mode': 'form',
'res_id': product.product_tmpl_id.id,
'target': 'new'}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
ctx = dict(context or {}, create_product_product=True)
return super(product_product, self).create(cr, uid, vals, context=ctx)
def need_procurement(self, cr, uid, ids, context=None):
return False
class product_packaging(osv.osv):
_name = "product.packaging"
_description = "Packaging"
_rec_name = 'ean'
_order = 'sequence'
_columns = {
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of packaging."),
'name' : fields.text('Description'),
'qty' : fields.float('Quantity by Package',
help="The total number of products you can put by pallet or box."),
'ul' : fields.many2one('product.ul', 'Package Logistic Unit', required=True),
'ul_qty' : fields.integer('Package by layer', help='The number of packages by layer'),
'ul_container': fields.many2one('product.ul', 'Pallet Logistic Unit'),
'rows' : fields.integer('Number of Layers', required=True,
help='The number of layers on a pallet or box'),
'product_tmpl_id' : fields.many2one('product.template', 'Product', select=1, ondelete='cascade', required=True),
'ean' : fields.char('EAN', size=14, help="The EAN code of the package unit."),
'code' : fields.char('Code', help="The code of the transport unit."),
'weight': fields.float('Total Package Weight',
help='The weight of a full package, pallet or box.'),
}
def _check_ean_key(self, cr, uid, ids, context=None):
for pack in self.browse(cr, uid, ids, context=context):
if not check_ean(pack.ean):
return False
return True
_constraints = [(_check_ean_key, 'Error: Invalid ean code', ['ean'])]
def name_get(self, cr, uid, ids, context=None):
if not len(ids):
return []
res = []
for pckg in self.browse(cr, uid, ids, context=context):
p_name = pckg.ean and '[' + pckg.ean + '] ' or ''
p_name += pckg.ul.name
res.append((pckg.id,p_name))
return res
def _get_1st_ul(self, cr, uid, context=None):
cr.execute('select id from product_ul order by id asc limit 1')
res = cr.fetchone()
return (res and res[0]) or False
_defaults = {
'rows' : 3,
'sequence' : 1,
'ul' : _get_1st_ul,
}
def checksum(ean):
salt = '31' * 6 + '3'
sum = 0
for ean_part, salt_part in zip(ean, salt):
sum += int(ean_part) * int(salt_part)
return (10 - (sum % 10)) % 10
checksum = staticmethod(checksum)
class product_supplierinfo(osv.osv):
_name = "product.supplierinfo"
_description = "Information about a product supplier"
def _calc_qty(self, cr, uid, ids, fields, arg, context=None):
result = {}
for supplier_info in self.browse(cr, uid, ids, context=context):
for field in fields:
result[supplier_info.id] = {field:False}
qty = supplier_info.min_qty
result[supplier_info.id]['qty'] = qty
return result
_columns = {
'name' : fields.many2one('res.partner', 'Supplier', required=True,domain = [('supplier','=',True)], ondelete='cascade', help="Supplier of this product"),
'product_name': fields.char('Supplier Product Name', help="This supplier's product name will be used when printing a request for quotation. Keep empty to use the internal one."),
'product_code': fields.char('Supplier Product Code', help="This supplier's product code will be used when printing a request for quotation. Keep empty to use the internal one."),
'sequence' : fields.integer('Sequence', help="Assigns the priority to the list of product supplier."),
'product_uom': fields.related('product_tmpl_id', 'uom_po_id', type='many2one', relation='product.uom', string="Supplier Unit of Measure", readonly="1", help="This comes from the product form."),
'min_qty': fields.float('Minimal Quantity', required=True, help="The minimal quantity to purchase to this supplier, expressed in the supplier Product Unit of Measure if not empty, in the default unit of measure of the product otherwise."),
'qty': fields.function(_calc_qty, store=True, type='float', string='Quantity', multi="qty", help="This is a quantity which is converted into Default Unit of Measure."),
'product_tmpl_id' : fields.many2one('product.template', 'Product Template', required=True, ondelete='cascade', select=True, oldname='product_id'),
'delay' : fields.integer('Delivery Lead Time', required=True, help="Lead time in days between the confirmation of the purchase order and the receipt of the products in your warehouse. Used by the scheduler for automatic computation of the purchase order planning."),
'pricelist_ids': fields.one2many('pricelist.partnerinfo', 'suppinfo_id', 'Supplier Pricelist', copy=True),
'company_id':fields.many2one('res.company','Company',select=1),
}
_defaults = {
'min_qty': 0.0,
'sequence': 1,
'delay': 1,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'product.supplierinfo', context=c),
}
_order = 'sequence'
class pricelist_partnerinfo(osv.osv):
_name = 'pricelist.partnerinfo'
_columns = {
'name': fields.char('Description'),
'suppinfo_id': fields.many2one('product.supplierinfo', 'Partner Information', required=True, ondelete='cascade'),
'min_quantity': fields.float('Quantity', required=True, help="The minimal quantity to trigger this rule, expressed in the supplier Unit of Measure if any or in the default Unit of Measure of the product otherrwise."),
'price': fields.float('Unit Price', required=True, digits_compute=dp.get_precision('Product Price'), help="This price will be considered as a price for the supplier Unit of Measure if any or the default Unit of Measure of the product otherwise"),
}
_order = 'min_quantity asc'
class res_currency(osv.osv):
_inherit = 'res.currency'
def _check_main_currency_rounding(self, cr, uid, ids, context=None):
cr.execute('SELECT digits FROM decimal_precision WHERE name like %s',('Account',))
digits = cr.fetchone()
if digits and len(digits):
digits = digits[0]
main_currency = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id
for currency_id in ids:
if currency_id == main_currency.id:
if main_currency.rounding < 10 ** -digits:
return False
return True
_constraints = [
(_check_main_currency_rounding, 'Error! You cannot define a rounding factor for the company\'s main currency that is smaller than the decimal precision of \'Account\'.', ['rounding']),
]
class decimal_precision(osv.osv):
_inherit = 'decimal.precision'
def _check_main_currency_rounding(self, cr, uid, ids, context=None):
cr.execute('SELECT id, digits FROM decimal_precision WHERE name like %s',('Account',))
res = cr.fetchone()
if res and len(res):
account_precision_id, digits = res
main_currency = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id
for decimal_precision in ids:
if decimal_precision == account_precision_id:
if main_currency.rounding < 10 ** -digits:
return False
return True
_constraints = [
(_check_main_currency_rounding, 'Error! You cannot define the decimal precision of \'Account\' as greater than the rounding factor of the company\'s main currency', ['digits']),
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
bealdav/OCB
|
addons/product/product.py
|
Python
|
agpl-3.0
| 61,896
|
import json
import os.path
import subprocess
import yaml
brokenlist = list()
class YAMLConfig(object):
_config_values = {}
def __init__(self, filename, default_keys={}, strict_mode=False):
self.filename = filename
self.default_keys = default_keys
self.strict_mode = strict_mode
self._load_config()
def _load_config(self):
if not os.path.exists(self.filename):
self._make_default_config()
else:
f = open(self.filename, 'r')
self._config_values = yaml.load(f)
f.close()
self._validate_config()
print("[Config] Config %s loaded!" % self.filename)
def _save_config(self):
f = open(self.filename, "w")
yaml.dump(self._config_values, f, indent=1)
f.close()
def _make_default_config(self):
try:
os.makedirs(os.path.dirname(self.filename))
except Exception as e:
print("Error making folder %s because %s" % (os.path.dirname(self.filename), e))
pass
f = open(self.filename, "w")
yaml.dump(self.default_keys, f, indent=1)
f.close()
print("[Config] Default config for %s created." % self.filename)
self._load_config()
def _validate_config(self):
for key, value in self.default_keys.items():
if key not in self._config_values:
self._config_values[key] = value
print("[Config] Added new default %s for config %s" % (key, self.filename))
if self.strict_mode:
for key in self._config_values.keys():
if key not in self.default_keys:
del self._config_values[key]
print("[Config] Deleted invlid key %s for config %s" % (key, self.filename))
else:
if self._config_values[key] is None:
self._config_values[key] = self.default_keys[key]
print("[Config] Resetting invalid key type for %s in config %s." % (key, self.filename))
self._save_config()
def _get_key(self, key):
if key not in self._config_values:
raise KeyError
if str(type(self._config_values[key])) == "<type 'unicode'>":
return self._config_values[key].encode('utf-8')
else:
return self._config_values[key]
def set_key(self, key, value):
self._config_values[key] = value
self._save_config()
def key_exists(self, key):
if key in self._config_values:
return True
else:
return False
def __getitem__(self, item):
return self._get_key(item)
def __setitem__(self, key, value):
self.set_key(key, value)
banList = []
globalConfig = YAMLConfig(
"cfg/pso2proxy.config.yml",
{
'myIpAddr': "0.0.0.0",
'bindIp': "0.0.0.0",
'blockNameMode': 1,
'noisy': False,
'admins': [],
'enabledShips': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
'commandPrefix': '!'
},
True
)
blockNames = {}
ShipLabel = {}
proxy_ver = subprocess.Popen(["git", "describe", "--always"], stdout=subprocess.PIPE).communicate()[0].rstrip("\n")
def is_admin(sega_id):
if sega_id in globalConfig['admins']:
return True
else:
return False
def load_block_names():
global blockNames
if globalConfig['blockNameMode'] == 0:
return "[ShipProxy] Blocks are not renamed"
if os.path.exists("cfg/blocknames.resources.json"):
f = open("cfg/blocknames.resources.json", 'r')
try:
blockNames = json.load(f, encoding='utf-8')
f.close()
return ("[ShipProxy] %s Block names loaded!" % len(blockNames))
except ValueError:
f.close()
return ("[ShipProxy] Failed to load blockname file")
else:
return "[ShipProxy] BlockName file does not exists"
load_block_names()
def load_ship_names():
global ShipLabel
ShipLabel.clear() # Clear list
ShipLabel["Console"] = "Console"
if os.path.exists("cfg/shipslabel.resources.json"):
f = open("cfg/shipslabel.resources.json", 'r')
try:
for key, val in json.load(f, encoding='utf-8').items():
ShipLabel[key] = val.encode("utf8", 'ignore')
f.close()
return ("[GlobalChat] %s ship labels names loaded!" % len(ShipLabel))
except ValueError:
f.close()
return ("[GlobalChat] Failed to load ship labels!")
else:
return "[GlobalChat] shipslabel file does not exists"
load_ship_names()
def load_bans():
global banList
if not os.path.exists('cfg/pso2proxy.bans.json'):
f = open('cfg/pso2proxy.bans.json', 'w')
f.write(json.dumps(banList))
f.close()
f = open('cfg/pso2proxy.bans.json', 'r')
bans = f.read()
f.close()
banList = json.loads(bans)
return ("[Bans] %i bans loaded!" % len(bans))
def save_bans():
global banList
f = open('cfg/pso2proxy.bans.json', 'w')
f.write(json.dumps(banList))
f.close()
print("[Bans] %i bans saved!" % len(banList))
def is_segaid_banned(segaid):
global banList
for ban in banList:
if 'segaId' in ban:
if ban['segaId'] == segaid:
return True
return False
def is_player_id_banned(player_id):
global banList
for ban in banList:
if 'playerId' in ban:
if int(ban['playerId']) == player_id:
return True
return False
load_bans()
myIpAddress = globalConfig['myIpAddr']
bindIp = globalConfig['bindIp']
blockNameMode = globalConfig['blockNameMode']
noisy = globalConfig['noisy']
admins = globalConfig['admins']
|
alama/PSO2Proxy
|
proxy/config.py
|
Python
|
agpl-3.0
| 5,799
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('third_party_auth', '0007_auto_20170406_0912'),
]
operations = [
migrations.AddField(
model_name='ltiproviderconfig',
name='drop_existing_session',
field=models.BooleanField(default=False, help_text='Whether to drop an existing session when accessing a view decorated with third_party_auth.decorators.tpa_hint_ends_existing_session when a tpa_hint URL query parameter mapping to this provider is included in the request.'),
),
migrations.AddField(
model_name='oauth2providerconfig',
name='drop_existing_session',
field=models.BooleanField(default=False, help_text='Whether to drop an existing session when accessing a view decorated with third_party_auth.decorators.tpa_hint_ends_existing_session when a tpa_hint URL query parameter mapping to this provider is included in the request.'),
),
migrations.AddField(
model_name='samlproviderconfig',
name='drop_existing_session',
field=models.BooleanField(default=False, help_text='Whether to drop an existing session when accessing a view decorated with third_party_auth.decorators.tpa_hint_ends_existing_session when a tpa_hint URL query parameter mapping to this provider is included in the request.'),
),
]
|
eduNEXT/edunext-platform
|
common/djangoapps/third_party_auth/migrations/0008_auto_20170413_1455.py
|
Python
|
agpl-3.0
| 1,439
|
#! /usr/bin/python2.3
import re
import xml.sax
import sys
import string
import os
from resolvemembernames import memberList
######################################################################
# Read wrans count
class WransCount(xml.sax.handler.ContentHandler):
def __init__(self):
self.count={}
def startElement(self, name, attr):
""" This handler is invoked for each XML element (during loading)"""
if (name == "speech") and (attr["type"] == "ques"):
id = attr["speakerid"]
id = re.sub("uk.org.publicwhip/member/", "", id)
self.count.setdefault(id, 0)
self.count[id] += 1
wranscount = WransCount()
parser = xml.sax.make_parser()
parser.setContentHandler(wranscount)
dir = "/home/francis/pwdata/pwscrapedxml/wrans/"
fdirin = os.listdir(dir)
for fin in fdirin:
print >>sys.stderr, fin
parser.parse(dir + fin)
######################################################################
# Read rebellions
f = open("rebellions.txt")
rb = f.read()
f.close()
rbl = re.findall("(.*?)\n", rb)
rs = {}
for x in rbl:
(mp, r) = re.split("\t", x)
rs[mp] = r
######################################################################
# Read majorities
s = "../rawdata/majorities2001.html"
f = open(s, "r")
tx = f.read()
f.close()
sl = re.split("<table[^>]*>([\s\S]*?)</table>(?i)", tx)
mpt = sl[3]
pr = re.findall("<tr><td><a[^>]*>(.*?)</a></td><td>(?:<a[^>]*>)?([^<]*?)(?:</a>)?</td><td>(?:<font[^>]*>)?(.*?)(?:</font>)?</td><td[^>]*>(?:<font[^>]*>)?(.*?)(?:</font>)?</td>.*?</tr>(?i)", mpt)
for jp in pr:
maj = string.atoi(string.replace(jp[3], ",", ""))
id = memberList.matchfulldivisionname(jp[1], '2001-06-07')
if id[0] <> "unknown":
thisid = re.sub("uk.org.publicwhip/member/", "", id[0])
print "%s, %s" % (maj, wranscount.count.setdefault(thisid, 0))
#print jp
# rs[thisid],
|
openaustralia/publicwhip-matthew
|
custom/majority/majex.py
|
Python
|
agpl-3.0
| 1,918
|
"""
Provides partition support to the user service.
"""
import logging
import random
from eventtracking import tracker
import openedx.core.djangoapps.user_api.course_tag.api as course_tag_api
from xmodule.partitions.partitions import NoSuchUserPartitionGroupError, UserPartitionError
log = logging.getLogger(__name__)
class NotImplementedPartitionScheme(object):
"""
This "scheme" allows previously-defined schemes to be purged, while giving existing
course data definitions a safe entry point to load.
"""
@classmethod
def get_group_for_user(cls, course_key, user, user_partition, assign=True): # pylint: disable=unused-argument
"""
Returning None is equivalent to saying "This user is not in any groups
using this partition scheme", be sure the scheme you're removing is
compatible with that assumption.
"""
return None
class ReturnGroup1PartitionScheme(object):
"""
This scheme is needed to allow verification partitions to be killed, see EDUCATOR-199
"""
@classmethod
def get_group_for_user(cls, course_key, user, user_partition, assign=True): # pylint: disable=unused-argument
"""
The previous "allow" definition for verification was defined as 1, so return that.
Details at https://github.com/edx/edx-platform/pull/14913/files#diff-feff1466ec4d1b8c38894310d8342a80
"""
return user_partition.get_group(1)
class RandomUserPartitionScheme(object):
"""
This scheme randomly assigns users into the partition's groups.
"""
RANDOM = random.Random()
@classmethod
def get_group_for_user(cls, course_key, user, user_partition, assign=True):
"""
Returns the group from the specified user position to which the user is assigned.
If the user has not yet been assigned, a group will be randomly chosen for them if assign flag is True.
"""
partition_key = cls.key_for_partition(user_partition)
group_id = course_tag_api.get_course_tag(user, course_key, partition_key)
group = None
if group_id is not None:
# attempt to look up the presently assigned group
try:
group = user_partition.get_group(int(group_id))
except NoSuchUserPartitionGroupError:
# jsa: we can turn off warnings here if this is an expected case.
log.warning(
u"group not found in RandomUserPartitionScheme: %r",
{
"requested_partition_id": user_partition.id,
"requested_group_id": group_id,
},
exc_info=True
)
except ValueError:
log.error(u"Bad group_id %r for user: %r", group_id, user)
if group is None and assign and not course_tag_api.BulkCourseTags.is_prefetched(course_key):
if not user_partition.groups:
raise UserPartitionError('Cannot assign user to an empty user partition')
# pylint: disable=fixme
# TODO: had a discussion in arch council about making randomization more
# deterministic (e.g. some hash). Could do that, but need to be careful not
# to introduce correlation between users or bias in generation.
group = cls.RANDOM.choice(user_partition.groups)
# persist the value as a course tag
course_tag_api.set_course_tag(user, course_key, partition_key, group.id)
# emit event for analytics
# FYI - context is always user ID that is logged in, NOT the user id that is
# being operated on. If instructor can move user explicitly, then we should
# put in event_info the user id that is being operated on.
event_name = 'xmodule.partitions.assigned_user_to_partition'
event_info = {
'group_id': group.id,
'group_name': group.name,
'partition_id': user_partition.id,
'partition_name': user_partition.name
}
# pylint: disable=fixme
# TODO: Use the XBlock publish api instead
with tracker.get_tracker().context(event_name, {}):
tracker.emit(
event_name,
event_info,
)
return group
@classmethod
def key_for_partition(cls, user_partition):
"""
Returns the key to use to look up and save the user's group for a given user partition.
"""
return 'xblock.partition_service.partition_{0}'.format(user_partition.id)
|
stvstnfrd/edx-platform
|
openedx/core/djangoapps/user_api/partition_schemes.py
|
Python
|
agpl-3.0
| 4,698
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyNvidiaMlPy3(PythonPackage):
"""Python Bindings for the NVIDIA Management Library."""
homepage = "http://www.nvidia.com/"
url = "https://pypi.io/packages/source/n/nvidia-ml-py3/nvidia-ml-py3-7.352.0.tar.gz"
version('7.352.0', sha256='390f02919ee9d73fe63a98c73101061a6b37fa694a793abf56673320f1f51277')
|
iulian787/spack
|
var/spack/repos/builtin/packages/py-nvidia-ml-py3/package.py
|
Python
|
lgpl-2.1
| 530
|
##! /usr/bin/env python
# _*_ coding: latin-1 _*_
import os
import jtutil
import jtdom
from jtelem import jtelem
class new(jtelem):
def __init__(self,top=None,left=None,bottom=None,right=None):
jtelem.__init__(self,top,left,bottom,right)
self.choicelist=[]
self.selectedindex=1
def classname(self):
return "jtcombo"
def xmladd(self):
x=jtutil.EOL
for c in self.choicelist:
x+="<item>"+jtutil.cdataif(c)+"</item>"+jtutil.EOL
x+="<selectedindex>"+str(self.selectedindex)+"</selectedindex>"+jtutil.EOL
return x
def xmlput(self,x):
self.selectedindex=int(jtdom.domtext(x))
def xmlget(self):
return str(self.selectedindex)
def changed(self):
return self.laststate!=self.selectedindex
def savestate(self):
self.laststate=self.selectedindex
def varput(self,x):
self.selectedindex=x
return self.selectedindex
def varget(self):
return self.selectedindex
def additem(self,item):
self.choicelist.append(item)
return item
def insertitem(self,item,idx):
self.choicelist.insert(idx-1,item)
def deleteitem(self,idx):
del self.choicelist[idx-1]
def select(self,x):
self.selectedindex=x
def getitem(self,x):
return self.choicelist[x-1]
def selecteditem(self):
sx=self.selectedindex
lx=len(self.choicelist)
if 0<sx and sx<=lx:
return self.choicelist[sx-1]
return None
def selectitem(self,txt):
try:
self.select(1+self.choicelist.index(txt))
except:
self.select(0)
return self.selectedindex
def changelist(self,x=None,mode=None,i=None):
_jtcombo_changelist(self,x,mode,i)
def changedelete(self,x):
_jtcombo_changelist(self,x,"del")
def changeappend(self,x):
_jtcombo_changelist(self,x,"app")
def changeinsert(self,x,i):
_jtcombo_changelist(self,x,"ins",i)
def _jtcombo_changelist(self,v=None,mode=None,i=None):
# jtcombo:changelist() elküldi (az egész) choicelistet
# jtcombo:changelist(v) kicseréli choicelistet, és küldi
# jtcombo:changelist(v,"app") bõvíti choicelistet, és küldi a bõvítést
# jtcombo:changelist(v,"ins",i) beszúr i-nél, és küldi a bõvítést
# jtcombo:changelist(v,"del",i) törli v/i-t, és küldi a törölt indexet
# elõször végrehajtjuk lokálisan
if not mode:
if not v:
v=self.choicelist # újraküldi choicelistet
else:
self.choicelist=v # kicseréli és küldi choicelistet
elif mode=="app":
if type(v)!=type([]):
v=[v]
self.choicelist+=v # bõvíti choicelistet, küldi a bõvítést
elif mode=="ins":
self.insertitem(v,i) # beszúr, és küldi a bõvítést
elif mode=="del":
if type(v)==type(0):
i=v
elif type(v)==type(""):
try:
i=1+self.choicelist.index(v)
except:
i=0
else:
pass
# eredeti i paraméter
if i<1 or len(self.choicelist)<1:
return
self.deleteitem(i) # törli i-t, és küldi a törölt indexet
else:
raise jtutil.applicationerror, ("jtcombo","invalid changelist mode", mode)
# azután elküldjük a terminálnak
x='<jtmessage'
x+=jtutil.ATTR("pid",str(os.getpid()))
x+=jtutil.ATTR("dialogid",self.dialogid)
x+='>'
x+="<control>"+self.name+"</control>"
x+="<changelist>"
if not mode:
x+="<removeall/>"+jtutil.EOL
for c in v:
x+="<app>"+jtutil.cdataif(c)+"</app>"+jtutil.EOL
elif mode=="app":
for c in v:
x+="<app>"+jtutil.cdataif(c)+"</app>"+jtutil.EOL
elif mode=="ins":
x+="<ins>"
x+="<at>"+str(i-1)+"</at>"+v
x+="</ins>"
elif mode=="del":
x+="<del>"+str(i-1)+"</del>"
x+="</changelist>"
x+="</jtmessage>"
self.send(x)
|
mrev11/ccc3
|
jt/jtpython/jtlib/jtcombo.py
|
Python
|
lgpl-2.1
| 4,135
|
# Requires the following packages
# math/py-matplotlib
# math/py-numpy
import matplotlib.pyplot as plt
import numpy as np
import sys
import re
# set default values
title = 'Input'
ylabel = 'y'
xlabel = 'x'
legend = 'upper left'
#linestyles = ['x', '--', ':', 'o', 'v', 's', '+', '1', '2', '3', '4' ]
linestyles = ['x-', '^-', 'o-', 's-', 'v-', '2-', '3-', '4-']
#for more linestyles see "http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot"
x = []
y = []
for line in sys.stdin:
line = line.replace('\n', '').replace('\t', ' ')
tup = line.split()
x.append(float(tup[0]))
y.append(float(tup[1]))
# Graphics
plt.title(title)
if xlabel:
plt.xlabel(xlabel, fontsize=18)
plt.ylabel(ylabel, fontsize=18)
#plt.ylim([0, best_result * 1.1])
leg = []
lab = []
xspan = x[-1] - x[0]
LEFT_OFFSET = 0.05*xspan
plt.xlim([x[0]-LEFT_OFFSET, x[-1]+LEFT_OFFSET])
# make plots in blue or black ?
#style = '.-'
style = '.'
pl = plt.plot(x, y, style, linewidth=2,
markersize = 2, antialiased=True)
leg.append(pl[0])
lab.append('independent variable')
plt.legend(leg, lab, legend, prop={'size': 16})
plt.show() #debug
#outfn = sys.argv[1].rsplit('.',1)[0]
#plt.savefig(outfn + '.eps')
|
vmaffione/rlite
|
scripts/plot-input.py
|
Python
|
lgpl-2.1
| 1,222
|
# Copyright 2014-2015, Tresys Technology, LLC
#
# This file is part of SETools.
#
# SETools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1 of
# the License, or (at your option) any later version.
#
# SETools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SETools. If not, see
# <http://www.gnu.org/licenses/>.
#
from . import exception
from . import qpol
from . import symbol
from . import user
from . import role
from . import typeattr
from . import mls
def context_factory(policy, name):
"""Factory function for creating context objects."""
if not isinstance(name, qpol.qpol_context_t):
raise TypeError("Contexts cannot be looked-up.")
return Context(policy, name)
class Context(symbol.PolicySymbol):
"""A SELinux security context/security attribute."""
def __str__(self):
try:
return "{0.user}:{0.role}:{0.type_}:{0.range_}".format(self)
except exception.MLSDisabled:
return "{0.user}:{0.role}:{0.type_}".format(self)
@property
def user(self):
"""The user portion of the context."""
return user.user_factory(self.policy, self.qpol_symbol.user(self.policy))
@property
def role(self):
"""The role portion of the context."""
return role.role_factory(self.policy, self.qpol_symbol.role(self.policy))
@property
def type_(self):
"""The type portion of the context."""
return typeattr.type_factory(self.policy, self.qpol_symbol.type_(self.policy))
@property
def range_(self):
"""The MLS range of the context."""
return mls.range_factory(self.policy, self.qpol_symbol.range(self.policy))
def statement(self):
raise exception.NoStatement
|
TresysTechnology/setools
|
setools/policyrep/context.py
|
Python
|
lgpl-2.1
| 2,137
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# pylint: disable=W0403
# Copyright (c) 2014-2015, Human Brain Project
# Cyrille Favreau <cyrille.favreau@epfl.ch>
#
# This file is part of RenderingResourceManager
# <https://github.com/BlueBrain/RenderingResourceManager>
#
# This library is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License version 3.0 as published
# by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# All rights reserved. Do not distribute without further notice.
"""
This class is in charge of handling rendering resources config, such as the
application name, executable name and command line parameters,
and ensures persistent storage in a database
"""
from rendering_resource_manager_service.config.models import RenderingResourceSettings
import rendering_resource_manager_service.utils.custom_logging as log
import rest_framework.status as http_status
from django.db import IntegrityError, transaction
from rest_framework.renderers import JSONRenderer
import json
class RenderingResourceSettingsManager(object):
"""
This class is in charge of handling session and ensures persistent storage in a database
"""
@classmethod
def create(cls, params):
"""
Creates new rendering resource config
:param params Settings for the new rendering resource
"""
try:
settings_id = params['id'].lower()
settings = RenderingResourceSettings(
id=settings_id,
command_line=str(params['command_line']),
environment_variables=str(params['environment_variables']),
modules=str(params['modules']),
process_rest_parameters_format=str(params['process_rest_parameters_format']),
scheduler_rest_parameters_format=str(params['scheduler_rest_parameters_format']),
project=str(params['project']),
queue=str(params['queue']),
exclusive=params['exclusive'],
nb_nodes=params['nb_nodes'],
nb_cpus=params['nb_cpus'],
nb_gpus=params['nb_gpus'],
memory=params['memory'],
graceful_exit=params['graceful_exit'],
wait_until_running=params['wait_until_running'],
name=params['name'],
description=params['description']
)
with transaction.atomic():
settings.save(force_insert=True)
msg = 'Rendering Resource ' + settings_id + ' successfully configured'
response = json.dumps({'contents': msg})
return [http_status.HTTP_201_CREATED, response]
except IntegrityError as e:
log.error(str(e))
response = json.dumps({'contents': str(e)})
return [http_status.HTTP_409_CONFLICT, response]
@classmethod
def update(cls, params):
"""
Updates some given rendering resource config
:param params new config for the rendering resource
"""
try:
settings_id = params['id'].lower()
settings = RenderingResourceSettings.objects.get(id=settings_id)
settings.command_line = params['command_line']
settings.environment_variables = params['environment_variables']
settings.modules = params['modules']
settings.process_rest_parameters_format = params['process_rest_parameters_format']
settings.scheduler_rest_parameters_format = params['scheduler_rest_parameters_format']
settings.project = params['project']
settings.queue = params['queue']
settings.exclusive = params['exclusive']
settings.nb_nodes = params['nb_nodes']
settings.nb_cpus = params['nb_cpus']
settings.nb_gpus = params['nb_gpus']
settings.memory = params['memory']
settings.graceful_exit = params['graceful_exit']
settings.wait_until_running = params['wait_until_running']
settings.name = params['name']
settings.description = params['description']
with transaction.atomic():
settings.save()
return [http_status.HTTP_200_OK, '']
except RenderingResourceSettings.DoesNotExist as e:
log.error(str(e))
return [http_status.HTTP_404_NOT_FOUND, str(e)]
@classmethod
def list(cls, serializer):
"""
Returns a JSON formatted list of active rendering resource config according
to a given serializer
:param serializer: Serializer used for formatting the list of session
"""
settings = RenderingResourceSettings.objects.all()
return [http_status.HTTP_200_OK,
JSONRenderer().render(serializer(settings, many=True).data)]
@staticmethod
def get_by_id(settings_id):
"""
Returns the config rendering resource config
:param settings_id id of rendering resource or which we want the config
"""
return RenderingResourceSettings.objects.get(id=settings_id)
@classmethod
def delete(cls, settings_id):
"""
Removes some given rendering resource config
:param settings_id Identifier of the Rendering resource config to remove
"""
try:
settings = RenderingResourceSettings.objects.get(id=settings_id)
with transaction.atomic():
settings.delete()
return [http_status.HTTP_200_OK, 'Settings successfully deleted']
except RenderingResourceSettings.DoesNotExist as e:
log.error(str(e))
return [http_status.HTTP_404_NOT_FOUND, str(e)]
@staticmethod
def format_rest_parameters(string_format, hostname, port, schema, job_id):
"""
Returns a string of rest parameters formatted according to the
string_format argument
:param string_format Rest parameter string format
:param hostname Rest hostname
:param port Rest port
:param schema Rest schema
"""
response = string_format
response = response.replace('${job_id}', str(job_id))
response = response.replace('${rest_hostname}', str(hostname))
response = response.replace('${rest_port}', str(port))
response = response.replace('${rest_schema}', str(schema))
return response
@classmethod
def clear(cls):
"""
Clear all config
"""
with transaction.atomic():
RenderingResourceSettings.objects.all().delete()
return [http_status.HTTP_200_OK, 'Settings cleared']
|
favreau/RenderingResourceManager
|
rendering_resource_manager_service/config/management/rendering_resource_settings_manager.py
|
Python
|
lgpl-3.0
| 7,222
|
from hpp.corbaserver.rbprm.rbprmbuilder import Builder
from hpp.gepetto import Viewer
white=[1.0,1.0,1.0,1.0]
green=[0.23,0.75,0.2,0.5]
yellow=[0.85,0.75,0.15,1]
pink=[1,0.6,1,1]
orange=[1,0.42,0,1]
brown=[0.85,0.75,0.15,0.5]
blue = [0.0, 0.0, 0.8, 1.0]
grey = [0.7,0.7,0.7,1.0]
red = [0.8,0.0,0.0,1.0]
rootJointType = 'freeflyer'
packageName = 'hpp-rbprm-corba'
meshPackageName = 'hpp-rbprm-corba'
urdfName = 'robot_test_trunk'
urdfNameRom = ['robot_test_lleg_rom','robot_test_rleg_rom']
urdfSuffix = ""
srdfSuffix = ""
rbprmBuilder = Builder ()
rbprmBuilder.loadModel(urdfName, urdfNameRom, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix)
rbprmBuilder.setJointBounds ("base_joint_xyz", [-6,6, -3, 3, 0, 1.5])
rbprmBuilder.boundSO3([-0.1,0.1,-3,3,-1.0,1.0])
rbprmBuilder.setFilter(['robot_test_lleg_rom', 'robot_test_rleg_rom'])
rbprmBuilder.setNormalFilter('robot_test_lleg_rom', [0,0,1], 0.5)
rbprmBuilder.setNormalFilter('robot_test_rleg_rom', [0,0,1], 0.5)
#~ from hpp.corbaserver.rbprm. import ProblemSolver
from hpp.corbaserver.rbprm.problem_solver import ProblemSolver
ps = ProblemSolver( rbprmBuilder )
r = Viewer (ps)
r.loadObstacleModel (packageName, "ground_jump_easy", "planning")
q_init = rbprmBuilder.getCurrentConfig ();
q_init [0:3] = [-4, 1, 0.9]; rbprmBuilder.setCurrentConfig (q_init); r (q_init)
q_goal = q_init [::]
#q_goal [0:3] = [-2, 0, 0.9]; r (q_goal) # premiere passerelle
q_goal [0:3] = [4, -1, 0.9]; r (q_goal) # pont
#~ ps.addPathOptimizer("GradientBased")
ps.addPathOptimizer("RandomShortcut")
#ps.client.problem.selectSteeringMethod("SteeringParabola")
#ps.selectPathPlanner("RRTdynamic")
ps.setInitialConfig (q_init)
ps.addGoalConfig (q_goal)
ps.client.problem.selectConFigurationShooter("RbprmShooter")
ps.client.problem.selectPathValidation("RbprmPathValidation",0.05)
r(q_init)
# (nameRoadmap,numberIt,colorNode,radiusSphere,sizeAxis,colorEdge
r.solveAndDisplay("rm",10,white,0.02,1,brown)
#t = ps.solve ()
#r.displayRoadmap("rm",0.005)
#r.displayPathMap("rmPath",0,0.02)
from hpp.gepetto import PathPlayer
pp = PathPlayer (rbprmBuilder.client.basic, r)
pp(0)
#pp.displayPath(1,blue)
#r.client.gui.setVisibility("path_0_root","ALWAYS_ON_TOP")
pp (1)
#r.client.gui.removeFromGroup("rm",r.sceneName)
#r.client.gui.removeFromGroup("rmPath",r.sceneName)
#r.client.gui.removeFromGroup("path_1_root",r.sceneName)
#~ pp.toFile(1, "/home/stonneau/dev/hpp/src/hpp-rbprm-corba/script/paths/stair.path")
|
mylene-campana/hpp-rbprm-corba
|
script/tests/robot_bigStep_STEVE_path.py
|
Python
|
lgpl-3.0
| 2,481
|
from os import path
from nose import tools
from tests.functional import single_machine_test
from nixops import backends
parent_dir = path.dirname(__file__)
has_hello_spec = '%s/single_machine_has_hello.nix' % (parent_dir)
rollback_spec = '%s/single_machine_rollback.nix' % (parent_dir)
class TestRollbackRollsback(single_machine_test.SingleMachineTest):
_multiprocess_can_split_ = True
def setup(self):
super(TestRollbackRollsback,self).setup()
self.depl.nix_exprs = self.depl.nix_exprs + [ rollback_spec ]
def run_check(self):
self.depl.deploy()
with tools.assert_raises(backends.SSHCommandFailed):
self.check_command("hello")
self.depl.nix_exprs = self.depl.nix_exprs + [ has_hello_spec ]
self.depl.deploy()
self.check_command("hello")
self.depl.rollback(generation=1)
with tools.assert_raises(backends.SSHCommandFailed):
self.check_command("hello")
|
garbas/nixops
|
tests/functional/test_rollback_rollsback.py
|
Python
|
lgpl-3.0
| 968
|
import logging
import libsbml
from odehandling.odewrapper import ODEWrapper
# 18.07.12 td: some idiotic type mismatches for AST identifiers in different libsbml versions
if not type(libsbml.AST_PLUS) == type(1):
libsbml.AST_PLUS = ord(libsbml.AST_PLUS)
if not type(libsbml.AST_MINUS) == type(1):
libsbml.AST_MINUS = ord(libsbml.AST_MINUS)
if not type(libsbml.AST_TIMES) == type(1):
libsbml.AST_TIMES = ord(libsbml.AST_TIMES)
if not type(libsbml.AST_DIVIDE) == type(1):
libsbml.AST_DIVIDE = ord(libsbml.AST_DIVIDE)
if not type(libsbml.AST_POWER) == type(1):
libsbml.AST_POWER = ord(libsbml.AST_POWER)
class ODEGenerator(object):
"""
This class takes a given SBML model (wrapped into a SBMLMainModel)
and computes ODEs out of all the given SBML Reactions.
It is necessary for integrating models that use Reactions (and are not
only built by using Rules).
The complete set of generated ODEs and of (Rate)Rules can then be given
to the integrator.
@param mainModel: A complete SBML MainModel
@type mainModel: sbml_model.sbml_mainmodel.SBMLMainModel
@since: 2010-07-07
"""
__author__ = "Moritz Wade"
__contact__ = "wade@zib.de"
__copyright__ = "Zuse Institute Berlin 2010"
def __init__(self, mainModel):
"""
Setting up instance variables and invoking the ODE generation.
"""
self.mainModel = mainModel
self.ODEs = None
self.speciesDAE = None
self._generateODEs()
def _generateODEs(self):
"""
The starting point for the ODE generation algorithm.
"""
logging.info("Starting ODE generation...")
self.ODEs = []
self.wrappedODEs = []
self.speciesDAE = []
index = 0
for speciesEntity in self.mainModel.SbmlSpecies:
if not speciesEntity.isDefiningOde():
continue
ode = self._odeFromReactions(speciesEntity)
if ode:
ode.DAE = False
ode.speciesEntity = speciesEntity
self.ODEs.append(ode)
wrappedODE = ODEWrapper(index, mathNode=ode, mainModel=self.mainModel, id=speciesEntity.getId(),
speciesEntity=speciesEntity)
self.wrappedODEs.append(wrappedODE)
index += 1
else:
self.speciesDAE.append(speciesEntity)
# self.speciesDAE.insert(0,speciesEntity)
# 09.08.12 td: adding the handling for Algebraic Rules (resulting in a DAE system)
for algRule in self.mainModel.SbmlAlgebraicRules:
if self.speciesDAE:
speciesEntity = self.speciesDAE.pop()
id = speciesEntity.getId()
else:
id = algRule.getLabel()
ode = algRule.Item.getMath()
if ode:
ode.DAE = True
ode.speciesEntity = algRule
self.ODEs.append(ode)
wrappedODE = ODEWrapper(index, mathNode=ode, mainModel=self.mainModel, id=id,
speciesEntity=algRule)
self.wrappedODEs.append(wrappedODE)
index += 1
# 09.08.12 td
def _odeFromReactions(self, speciesEntity):
"""
Parses through all Reactions of the model that involve the given
Species and sums up the stoichiometry, etc.
"""
species = speciesEntity.Item
ode = None
#search for the species in all reactions, and
#add up the kinetic laws * stoichiometry for
#all consuming and producing reactions to
#an ODE
for reactionEntity in self.mainModel.SbmlReactions:
reaction = reactionEntity[0].Item
reactionSymbol = libsbml.ASTNode()
reactionSymbol.setName(reaction.getId())
kineticLaw = reaction.getKineticLaw()
if not kineticLaw:
logging.error("The model has no kinetic law for reaction %s" % reaction.getId())
return
reactantReferences = reaction.getListOfReactants()
if (reactantReferences == None
or len(reactantReferences) == 0):
continue
for reactantReference in reactantReferences:
reactantWrapper = self.mainModel.dictOfSpecies[reactantReference.getSpecies()]
if reactantWrapper.Item == species:
# Construct expression for reactant by multiplying the
# kinetic law with stoichiometry (math) and putting a
# minus in front of it
if reactantReference.isSetStoichiometryMath():
reactant = libsbml.ASTNode()
reactant.setCharacter("*")
tmp = reactantReference.getStoichiometryMath().getMath()
reactant.addChild(self.copyAST(tmp))
reactant.addChild(self.copyAST(reactionSymbol))
else:
if reactantReference.getStoichiometry() == 1.0:
reactant = self.copyAST(reactionSymbol)
else:
reactant = libsbml.ASTNode()
reactant.setCharacter("*")
tmp = libsbml.ASTNode()
tmp.setValue(reactantReference.getStoichiometry())
reactant.addChild(tmp)
reactant.addChild(self.copyAST(reactionSymbol))
# Add reactant expression to ODE
if not ode:
ode = libsbml.ASTNode()
ode.setCharacter("-")
ode.addChild(reactant)
else:
tmp = self.copyAST(ode)
ode = libsbml.ASTNode()
ode.setCharacter("-")
ode.addChild(tmp)
ode.addChild(reactant)
productReferences = reaction.getListOfProducts()
if (productReferences == None
or len(productReferences) == 0):
continue
for productReference in productReferences:
try:
reactantWrapper = self.mainModel.dictOfSpecies[productReference.getSpecies()]
except KeyError, e:
logging.error("Could not create ODEs. Species is missing: %s" % e)
raise
if reactantWrapper.Item == species:
reactant = libsbml.ASTNode()
reactant.setCharacter("*")
if productReference.isSetStoichiometryMath():
tmp = productReference.getStoichiometryMath().getMath()
reactant.addChild(self.copyAST(tmp))
else:
tmp = libsbml.ASTNode()
tmp.setValue(productReference.getStoichiometry())
reactant.addChild(tmp)
reactant.addChild(self.copyAST(reactionSymbol))
# Add reactant expression to ODE
if not ode:
ode = reactant
else:
tmp = self.copyAST(ode)
ode = libsbml.ASTNode()
ode.setCharacter("+")
ode.addChild(tmp)
ode.addChild(reactant)
# # TODO: Reenable this! Stoichiometry won't work correctly without it.
#
# # Divide ODE by Name of the species' compartment.
# # If formula is empty skip division by compartment and set formula
# # to 0. The latter case can happen, if a species is neither
# # constant nor a boundary condition but appears only as a modifier
# # in reactions. The rate for such species is set to 0.
# if ode:
# #compartment = self.mainModel.SbmlModel.getCompartmentById(species.getCompartment())
# for compartmentEntity in self.mainModel.SbmlCompartments:
# compartment = compartmentEntity.Item
# if compartment.getId() == species.getCompartment():
# tmp = self.copyAST(ode)
# ode = libsbml.ASTNode()
# ode.setCharacter("/")
# ode.addChild(tmp)
# temp = libsbml.ASTNode()
# temp.setName(compartment.getId())
# ode.addChild(temp)
# else:
# # for modifier species that never appear as products or reactants
# # but are not defined as constant or boundarySpecies, set ODE to 0.
# ode = libsbml.ASTNode()
# ode.setValue(0) # change for DAE models should be defined by algebraic rule!
# simpleOde = self.simplifyAST(ode) # does not yet work correctly
# return simpleOde
return ode
def copyAST(self, original):
"""
Copies the passed AST, including potential SOSlib ASTNodeIndex, and
returns the copy.
"""
copy = libsbml.ASTNode()
# Distinction of cases
#integers, reals
if original.isInteger():
copy.setValue(original.getInteger())
elif original.isReal():
copy.setValue(original.getReal())
# variables
elif original.isName():
#if original.isSetIndex():
if original is ASTIndexNameNode:
copy = ASTIndexNameNode()
copy.setIndex(original.getIndex())
copy.setName(original.getName())
# time and delay nodes
copy.setType(original.getType())
#if original.isSetData():
if original is ASTIndexNameNode and original.getData is not None:
copy.setData()
# constants, functions, operators
else:
copy.setType(original.getType())
# user-defined functions: name must be set
if original.getType() == libsbml.AST_FUNCTION:
copy.setName(original.getName())
for i in xrange(original.getNumChildren()):
copy.addChild(self.copyAST(original.getChild(i)))
return copy
def simplifyAST(self, org):
"""
Takes an AST f, and returns a simplified copy of f.
decomposes n-ary `times' and `plus' nodes into an AST of binary AST.
simplifies (arithmetic) operations involving 0 and 1: \n
-0 -> 0;\n
x+0 -> x, 0+x -> x;\n
x-0 -> x, 0-x -> -x;\n
x*0 -> 0, 0*x -> 0, x*1 -> x, 1*x -> x;\n
0/x -> 0, x/1 -> x;\n
x^0 -> 1, x^1 -> x, 0^x -> 0, 1^x -> 1;\n
propagates unary minuses\n
--x -> x; \n
-x + -y -> -(x+y), -x + y -> y-x, x + -y -> x-y; \n
-x - -y -> y-x, -x - y -> -(x+y), x - -y -> x+y; \n
-x * -y -> x*y, -x * y -> -(x*y), x * -y -> -(x*y);\n
-x / -y -> x/y, -x / y -> -(x/y), x / -y -> -(x/y); \n
calls evaluateAST(subtree), if no variables or user-defined
functions occur in the AST subtree,
calls itself recursively for childnodes.
"""
# new ASTNode
simple = libsbml.ASTNode()
nodeType = org.getType()
# DISTINCTION OF CASES
# integers, reals
if org.isInteger():
simple.setValue(org.getInteger())
elif org.isReal():
simple.setValue(org.getReal())
# variables
elif org.isName():
if org is ASTIndexNameNode:
simple = ASTIndexNameNode()
simple.setIndex(org.getIndex())
if org.isSetData():
simple.setData()
simple.setName(org.getName())
simple.setType(org.getType())
# --------------- operators with possible simplifications --------------
# special operator: unary minus
elif org.isUMinus():
left = self.simplifyAST(org.getLeftChild())
if self.zero(left): # -0 = 0
simple = left
elif left.isUMinus(): # - -x
simple = self.cutRoot(left)
else:
simple.setType(libsbml.AST_MINUS)
simple.addChild(left)
# general operators
elif org.isOperator() or nodeType == libsbml.AST_FUNCTION_POWER:
numOfChildren = org.getNumChildren()
# zero operands: set to neutral element
if numOfChildren == 0:
if nodeType == libsbml.AST_PLUS:
simple.setValue(0)
elif nodeType == libsbml.AST_TIMES:
simple.setValue(1)
#one operand: set node to operand
elif numOfChildren == 1:
if nodeType == libsbml.AST_PLUS:
simple = self.simplifyAST(org.getChild(0))
elif nodeType == libsbml.AST_TIMES:
simple = self.simplifyAST(org.getChild(0))
#>2 operands: recursively decompose
# into tree with 2 operands
elif numOfChildren > 2:
if nodeType == libsbml.AST_PLUS:
simple.setType(libsbml.AST_PLUS)
elif nodeType == libsbml.AST_TIMES:
simple.setType(libsbml.AST_TIMES)
#copy/simplify left child ...
simple.addChild(self.simplifyAST(org.getChild(0)))
# ... and move other child down
helper = libsbml.ASTNode()
helper.setType(nodeType)
for i in xrange(numOfChildren):
helper.addChild(self.simplifyAST(org.getChild(i)))
simple.addChild(self.simplifyAST(helper))
# 2 operands: remove 0s and 1s and unary minuses
else:
left = self.simplifyAST(org.getLeftChild())
right = self.simplifyAST(org.getRightChild())
# default: simplification
simplify = 1 # set flag
if nodeType == libsbml.AST_PLUS:
# binary plus x + y
if self.zero(right): # x+0 = x
simple = left
elif self.zero(left): # 0+x=x
simple = right
elif left.isUMinus() and right.isUMinus():
# -x + -y
simple.setType(libsbml.AST_MINUS)
simple.addChild(libsbml.ASTNode())
helper = simple.getChild(0)
helper.setType(libsbml.AST_PLUS)
helper.addChild(self.cutRoot(left))
helper.addChild(self.cutRoot(right))
elif left.isUMinus():
# -x + y
simple.setType(libsbml.AST_MINUS)
simple.addChild(right)
simple.addChild(self.cutRoot(left))
elif right.isUMinus():
# x + -y
simple.setType(libsbml.AST_MINUS)
left.addChild(left)
simple.addChild(self.cutRoot(right))
else:
simplify = 0
elif nodeType == libsbml.AST_MINUS:
# binary minus x - y
if self.zero(right):
# x-0 = x
simple = left
elif self.zero(left):
# 0-x = -x
simple.setType(nodeType)
simple.addChild(right)
elif left.isUMinus() and right.isUMinus():
# -x - -y
simple.setType(libsbml.AST_MINUS)
simple.addChild(self.cutRoot(right))
simple.addChild(self.cutRoot(left))
elif left.isUMinus():
# -x - y
simple.setType(libsbml.AST_MINUS)
simple.addChild(libsbml.ASTNode())
helper = simple.getChild(0)
helper.setType(libsbml.AST_PLUS)
helper.addChild(self.cutRoot(left))
helper.addChild(right)
elif right.isUMinus():
# x - -y
simple.setType(libsbml.AST_PLUS)
simple.addChild(left)
simple.addChild(self.cutRoot(right))
else:
simplify = 0
elif nodeType == libsbml.AST_TIMES:
# binary times x * y
if self.zero(right):
# x*0 = 0
simple = right
elif self.zero(left):
# 0*x = 0
simple = left
elif self.one(right):
# x*1 = x
simple = left
elif self.one(left):
# 1*x = x
simple = right
elif left.isUMinus() and right.isUMinus():
# -x * -y
simple.setType(libsbml.AST_TIMES)
simple.addChild(self.cutRoot(left))
simple.addChild(self.cutRoot(right))
elif left.isUMinus():
# -x * y
simple.setType(libsbml.AST_MINUS)
simple.addChild(libsbml.ASTNode())
helper = simple.getChild(0)
helper.setType(libsbml.AST_TIMES)
helper.addChild(self.cutRoot(left))
helper.addChild(right)
elif right.isUMinus():
# x * -y
simple.setType(libsbml.AST_MINUS)
simple.addChild(libsbml.ASTNode())
helper = simple.getChild(0)
helper.setType(libsbml.AST_TIMES)
helper.addChild(left)
helper.addChild(self.cutRoot(right))
else:
simplify = 0
elif nodeType == libsbml.AST_DIVIDE:
# binary divide x / y
if self.zero(left):
# 0/x = 0
simple = left
elif self.one(right):
# x/1 = x
simple = left
elif left.isUMinus() and right.isUMinus():
# -x / -y
simple.setType(libsbml.AST_DIVIDE)
simple.addChild(self.cutRoot(left))
simple.addChild(self.cutRoot(right))
elif left.isUMinus():
# -x / y
simple.setType(libsbml.AST_MINUS)
simple.addChild(libsbml.ASTNode())
helper = simple.getChild(0)
helper.setType(libsbml.AST_DIVIDE)
helper.addChild(self.cutRoot(left))
helper.addChild(right)
elif right.isUMinus():
# x / -y
simple.setType(libsbml.AST_MINUS)
simple.addChild(libsbml.ASTNode())
helper = simple.getChild(0)
helper.setType(libsbml.AST_DIVIDE)
helper.addChild(left)
helper.addChild(self.cutRoot(right))
else:
simplify = 0
elif nodeType == libsbml.AST_POWER or nodeType == libsbml.AST_FUNCTION_POWER:
# power x^y
if self.zero(right):
# x^0 = 1
simple.setValue(1.0)
elif self.one(right):
# x^1 = x
simple = left
elif self.zero(left):
# 0^x = 0
simple.setValue(0.0)
elif self.one(left):
# 1^x = 1
simple.setValue(1.0)
else:
simplify = 0
else:
logging.error("simplifyAST: unknown failure for operator nodeType")
if not simplify:
# after all, no simplification
simple.setType(nodeType)
simple.addChild(left)
simple.addChild(right)
# -------------------- cases with no simplifications -------------------
# constants (leaves)
# functions, operators (branches)
else:
simple.setType((nodeType))
# user-defined functions: name must be set
if org.getType() == libsbml.AST_FUNCTION:
simple.setName(org.getName())
for i in xrange(org.getNumChildren):
simple.addChild(self.simplifyAST(org.getChild(i)))
return simple
def zero(self, f):
"""
Small helper function to determine if the value of a node is zero.
TODO: Is this needed in Python?
"""
if f.isReal():
return f.getReal() == 0.0
if f.isInteger():
return f.getInteger() == 0
return 0
def one (self, f):
"""
Small helper function to determine if the value of a node is one.
TODO: Is this needed in Python?
"""
if f.isReal():
return f.getReal() == 1.0
if f.isInteger():
return f.getInteger() == 1
return 0
def cutRoot(self, old):
return self.copyAST(old.getChild(0))
def getNameNodes(self, mathNode, nameNodes):
"""
Recursively gets all children ASTNodes of mathNode that
are of type "Name".
"""
if mathNode.isName():
nameNodes.append(mathNode)
numChildren = mathNode.getNumChildren()
for i in xrange(numChildren):
child = mathNode.getChild(i)
self.getNameNodes(child, nameNodes)
# TODO:
#/** Returns true (1) if the ASTNode is an ASTIndexNameNode
# */
#SBML_ODESOLVER_API int ASTNode_isIndexName(ASTNode_t *node)
#{
# return dynamic_cast<ASTIndexNameNode*>(node) != 0;
#}
#
#/** Returns true (1) if the an indexed ASTNode (ASTIndexNameNode) has
# it's index set
#*/
#SBML_ODESOLVER_API unsigned int ASTNode_isSetIndex(ASTNode_t *node)
#{
# return ASTNode_isIndexName(node) && static_cast<ASTIndexNameNode*>(node)->isSetIndex();
#}
class ASTIndexNameNode(libsbml.ASTNode):
def __init__(self):
self.index = -1
self.data = None
def getIndex(self):
return self.index
def setIndex(self, index):
self.index = index
def getData(self):
return self.data
def setData(self, data):
self.data = data
#TODO:
#
#/* appends the symbols in the given AST to the given list.
# 'char *' strings are appended to the list these strings
# should not be freed and exist as long as the AST. */
#void ASTNode_getSymbols(ASTNode_t *node, List_t *symbols)
#{
# int i ;
#
# if ( ASTNode_getType(node) == AST_NAME )
# List_add(symbols, (char*) ASTNode_getName(node));
#
# for ( i=0; i<ASTNode_getNumChildren(node); i++ )
# ASTNode_getSymbols(ASTNode_getChild(node, i), symbols);
#}
#/* appends the indices in the given indexed AST to the given list. */
#int ASTNode_getIndices(ASTNode_t *node, List_t *indices)
#{
# int i;
#
# if ( ASTNode_isSetIndex(node) )
# {
# int *idx;
# ASSIGN_NEW_MEMORY(idx, int, 0);
# *idx = ASTNode_getIndex(node);
# List_add(indices, idx);
# }
#
# for ( i=0; i<ASTNode_getNumChildren(node); i++ )
# ASTNode_getIndices(ASTNode_getChild(node, i), indices);
#
# return 1;
#}
|
CSB-at-ZIB/BioPARKIN
|
src/odehandling/odegenerator.py
|
Python
|
lgpl-3.0
| 24,889
|