text stringlengths 4 1.02M | meta dict |
|---|---|
"""empty message
Revision ID: 932c6631a9d2
Revises: 17dc0b76c2c6
Create Date: 2016-10-14 13:13:58.447691
"""
# revision identifiers, used by Alembic.
revision = "932c6631a9d2"
down_revision = "17dc0b76c2c6"
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column("entry", sa.Column("site", sa.String(length=1024), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column("entry", "site")
### end Alembic commands ###
| {
"content_hash": "1245087e59e56fecff9185e71bc3d0c3",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 84,
"avg_line_length": 23.423076923076923,
"alnum_prop": 0.6880131362889984,
"repo_name": "DBeath/flask-feedrsub",
"id": "a13829ff7c976f8f12f83725aa3b7f8888703648",
"size": "609",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/932c6631a9d2_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4337"
},
{
"name": "Dockerfile",
"bytes": "1105"
},
{
"name": "HTML",
"bytes": "60608"
},
{
"name": "JavaScript",
"bytes": "24058"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "815501"
},
{
"name": "Shell",
"bytes": "6364"
}
],
"symlink_target": ""
} |
import os
import json
import string
import readline
import itertools
import collections
TEST_DIR = os.path.dirname(__file__)
ROOT_DIR = os.path.dirname(TEST_DIR)
SRC_DIR = os.path.join(ROOT_DIR, "src")
addr_to_name = {}
name_to_addr = {}
element_lists = collections.defaultdict(list)
breakpoints = set()
def completer(text, state):
options = [x for x in itertools.chain(name_to_addr,
element_lists,
breakpoints)
if x.startswith(text)]
try:
return options[state]
except IndexError:
return None
readline.set_completer(completer)
if 'libedit' in readline.__doc__:
readline.parse_and_bind('bind ^I rl_complete')
else:
readline.parse_and_bind('tab: complete')
input_line = ''
ops = []
for line in open("scanned.dpt"):
if line.startswith("input "):
input_line = line[6:-1]
else:
ops.append(map(string.strip, line.split()))
def getstr(capture):
start, end = capture.split(':')
return input_line[int(start):int(end)]
def printlist(name_or_addr):
if name_or_addr in name_to_addr:
print "(%s) %s" % (name_or_addr, element_lists[name_to_addr[name_or_addr]])
elif name_or_addr in element_lists:
print "(%s) %s" % (addr_to_name.get(name_or_addr, name_or_addr),
element_lists[name_or_addr])
else:
print "error: unknown list --", name_or_addr
def handleop(fields):
addr = fields[0]
loc = fields[1].split(':')
method_name = fields[2]
method_args = fields[3:]
if addr == '0x0':
el = None
else:
el = element_lists[addr]
if method_name == 'element_list_t':
addr_to_name[addr] = method_args[0]
name_to_addr[method_args[0]] = addr
elif method_name == '~element_list_t':
pass
elif method_name == 'push_back':
el.append((method_args[0], getstr(method_args[1])))
elif method_name == 'pop_front':
el.pop(0)
elif method_name == 'pop_back':
el.pop()
elif method_name == 'clear2':
el[::] = []
elif method_name == 'splice':
pos = int(method_args[0])
other = element_lists[method_args[1]]
start, from_end = map(int, method_args[2].split(':'))
end = len(other) - from_end
sub_list = other[start:end]
del other[start:end]
el[pos:pos] = sub_list
elif method_name == 'point':
breakpoints.add(method_args[0])
else:
print "Unhandled method: ", method_name
def playupto(length):
addr_to_name.clear()
name_to_addr.clear()
element_lists.clear()
for index in range(length):
handleop(ops[index])
def find_prev_point(start, name):
orig_start = start
while start > 0:
start -= 1;
fields = ops[start]
if fields[2] != 'point':
continue
if not name or fields[3] == name:
return start + 1
return orig_start + 1
def find_next_point(start, name):
orig_start = start
while start < len(ops):
start += 1;
fields = ops[start]
if fields[2] != 'point':
continue
if not name or fields[3] == name:
return start + 1
return orig_start + 1
index = len(ops)
last_cmd = ['']
watch_list = set()
while True:
playupto(index)
if index == 0:
print "init"
else:
op = ops[index - 1]
print "#%s %s" % (index -1, op)
if op[2] == 'push_back':
print getstr(op[4])
for list_name in watch_list:
printlist(list_name)
try:
cmd = raw_input("> ").split()
except EOFError:
print
break
if not cmd or cmd[0] == '':
cmd = last_cmd
if not cmd or cmd[0] == '':
pass
elif cmd[0] == 'q':
break
elif cmd[0] == 'n':
if index < len(ops):
index += 1
elif cmd[0] == 'r':
if index > 0:
index -= 1
elif cmd[0] == 'b':
if len(cmd) == 1:
cmd.append('')
index = find_prev_point(index - 1, cmd[1])
elif cmd[0] == 'c':
if len(cmd) == 1:
cmd.append('')
index = find_next_point(index - 1, cmd[1])
elif cmd[0] == 'p':
if len(cmd) > 1:
printlist(cmd[1])
else:
print input_line
for addr in element_lists:
printlist(addr)
elif cmd[0] == 'w':
watch_list.add(cmd[1])
elif cmd[0] == 'u':
if watch_list:
watch_list.remove(cmd[1])
else:
print "error: unknown command --", cmd
last_cmd = cmd
| {
"content_hash": "78d6060f8117ec5d497d0c430e481792",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 83,
"avg_line_length": 25.795580110497237,
"alnum_prop": 0.5322338830584707,
"repo_name": "acleasby/lnav",
"id": "9267444de320bddbf568a0248747f3fd9a25bf2e",
"size": "6198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/parser_debugger.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "199634"
},
{
"name": "C++",
"bytes": "2133903"
},
{
"name": "CMake",
"bytes": "3455"
},
{
"name": "Groff",
"bytes": "25621"
},
{
"name": "Makefile",
"bytes": "1074"
},
{
"name": "Objective-C",
"bytes": "2887"
},
{
"name": "Python",
"bytes": "9684"
},
{
"name": "Ruby",
"bytes": "3364"
},
{
"name": "Shell",
"bytes": "101457"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from multilingual_survey import views
urlpatterns = [
url(r'^(?P<slug>[\w-]+)/$', views.survey_form, name='form'),
url(r'^success/(?P<uuid>\w+)/$', views.survey_success, name='success'),
]
| {
"content_hash": "5498e28532d2e389de6ff5b81e56e1e7",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 75,
"avg_line_length": 29,
"alnum_prop": 0.646551724137931,
"repo_name": "diadzine/django-simple-multilingual-survey",
"id": "5962b50553b092cc3cd51061b0e946554eb393df",
"size": "232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "multilingual_survey/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "17009"
},
{
"name": "Python",
"bytes": "12346"
}
],
"symlink_target": ""
} |
"""Implements a feed-forward neural net."""
import gzip
import logging
import sys
import time
import csv
from google.protobuf import text_format
from memory_profiler import *
from datahandler import *
from convolutions import *
from edge import *
from layer import *
from util import *
from logistic_layer import *
from tanh_layer import *
from relu_layer import *
from smooth_relu_layer import *
from linear_layer import *
from softmax_layer import *
from replicated_softmax_layer import *
from cos_layer import *
from sin_layer import *
from transfer_edge import *
from soft_transfer_edge import *
from neuralnet import *
import eigenmat as mat
class ControlledDropoutNet(object):
def __init__(self, net, small_net=None, t_op=None, e_op=None):
self.net = None
if isinstance(net, deepnet_pb2.Model):
self.net = net # ff
elif isinstance(net, str) or isinstance(net, unicode):
self.net = ReadModel(net)
self.t_op = None
if isinstance(t_op, deepnet_pb2.Operation):
self.t_op = t_op
elif isinstance(t_op, str) or isinstance(net, unicode):
self.t_op = ReadOperation(t_op)
self.e_op = None
if isinstance(e_op, deepnet_pb2.Operation):
self.e_op = e_op # ff
elif isinstance(e_op, str) or isinstance(net, unicode):
self.e_op = ReadOperation(e_op)
cm.CUDAMatrix.init_random(self.net.seed)
np.random.seed(self.net.seed)
self.data = None
self.layer = [] # has bias
self.edge = [] # has weight
self.input_datalayer = []
self.output_datalayer = []
self.datalayer = []
self.tied_datalayer = []
self.unclamped_layer = []
self.verbose = False
self.batchsize = 0
if self.t_op: # ff
self.verbose = self.t_op.verbose
self.batchsize = self.t_op.batchsize
elif self.e_op:
self.verbose = self.e_op.verbose
self.batchsize = self.e_op.batchsize
self.train_stop_steps = sys.maxint
# Add variables for small net
self.small_net = NeuralNet(small_net, False, t_op, e_op)
# self.small_net_cd = NeuralNet(small_net, True, t_op, e_op)
self.randNum = []
def PrintNetwork(self):
for layer in self.layer:
print layer.name
layer.PrintNeighbours()
def DeepCopy(self):
return CopyModel(self.net)
def LoadModelOnGPU(self, batchsize=-1):
"""Load the model on the GPU."""
if batchsize < 0:
if self.t_op:
batchsize = self.t_op.batchsize
else:
batchsize = self.e_op.batchsize
for layer in self.net.layer:
layer.hyperparams.MergeFrom(LoadMissing(layer.hyperparams,
self.net.hyperparams))
if not layer.prefix:
layer.prefix = self.net.prefix
tied_to = None
if layer.tied:
tied_to = next(l for l in self.layer if l.name == layer.tied_to)
self.layer.append(CreateLayer(Layer, layer, self.t_op, tied_to=tied_to))
for edge in self.net.edge:
hyp = deepnet_pb2.Hyperparams()
hyp.CopyFrom(self.net.hyperparams)
hyp.MergeFrom(edge.hyperparams)
edge.hyperparams.MergeFrom(hyp)
try:
node1 = next(layer for layer in self.layer if layer.name == edge.node1)
except StopIteration:
print edge.node1, [l.name for l in self.layer]
node2 = next(layer for layer in self.layer if layer.name == edge.node2)
if not edge.prefix:
edge.prefix = self.net.prefix
tied_to = None
if edge.tied:
tied_to = next(
e for e in self.edge if e.node1.name == edge.tied_to_node1 and e.node2.name == edge.tied_to_node2)
self.edge.append(CreateEdge(Edge, edge, node1, node2, self.t_op, tied_to=tied_to))
self.input_datalayer = [node for node in self.layer if node.is_input]
self.output_datalayer = [node for node in self.layer if node.is_output]
self.node_list = self.Sort()
def ExchangeGlobalInfo(self):
for layer in self.layer:
layer.GetGlobalInfo(self)
for edge in self.edge:
edge.GetGlobalInfo(self)
def Sort(self):
"""Topological sort."""
node_list = []
S = [node for node in self.layer if not node.incoming_neighbour]
while S:
n = S.pop()
node_list.append(n)
for m in n.outgoing_edge:
if m.marker == 0:
m.marker = 1
if reduce(lambda a, edge: a and edge.marker == 1,
m.node2.incoming_edge, True):
S.append(m.node2)
if reduce(lambda a, edge: a and edge.marker == 1, self.edge, True):
if self.verbose:
print 'Fprop Order:'
for node in node_list:
print node.name
else:
raise Exception('Invalid net for backprop. Cycle exists.')
return node_list
def ComputeUp(self, layer, train=False, step=0, maxsteps=0):
"""
Computes the state of `layer', given the state of its incoming neighbours.
Args:
layer: Layer whose state is to be computed.
train: True if this computation is happening during training, False during evaluation.
step: Training step.
maxsteps: Maximum number of steps that will be taken (Needed because some
hyperparameters may depend on this).
"""
layer.dirty = False
perf = None
if layer.is_input or layer.is_initialized:
layer.GetData()
else:
for i, edge in enumerate(layer.incoming_edge):
if edge in layer.outgoing_edge:
continue
inputs = layer.incoming_neighbour[i].state
if edge.conv or edge.local:
if i == 0:
ConvolveUp(inputs, edge, layer.state)
else:
AddConvoleUp(inputs, edge, layer.state)
else:
w = edge.params['weight']
factor = edge.proto.up_factor
if i == 0:
cm.dot(w.T, inputs, target=layer.state) # dot product between input and w
if factor != 1:
layer.state.mult(factor)
else:
layer.state.add_dot(w.T, inputs, mult=factor)
b = layer.params['bias']
if layer.replicated_neighbour is None:
layer.state.add_col_vec(b)
else:
layer.state.add_dot(b, layer.replicated_neighbour.NN)
layer.ApplyActivation()
if layer.hyperparams.sparsity:
layer.state.sum(axis=1, target=layer.dimsize)
perf = deepnet_pb2.Metrics()
perf.MergeFrom(layer.proto.performance_stats)
perf.count = layer.batchsize
perf.sparsity = layer.dimsize.sum() / layer.dimsize.shape[0]
if layer.hyperparams.dropout: # If there is dropout option in the hyperparams
if train and maxsteps - step >= layer.hyperparams.stop_dropout_for_last:
# Randomly set states to zero.
if layer.hyperparams.mult_dropout:
layer.mask.fill_with_randn()
layer.mask.add(1)
layer.state.mult(layer.mask)
else:
layer.mask.fill_with_rand()
layer.mask.greater_than(layer.hyperparams.dropout_prob)
if layer.hyperparams.blocksize > 1:
layer.mask.blockify(layer.hyperparams.blocksize)
layer.state.mult(layer.mask)
else:
# Produce expected output.
if layer.hyperparams.mult_dropout:
pass
else:
layer.state.mult(1.0 - layer.hyperparams.dropout_prob)
# For Controlled Dropout, multiply 0.5 to produce expected output
if not train:
# layer.state.mult(0.5)
if layer.activation == 3: # when it is hidden layer
# Controlled dropout
layer.state.mult(0.5)
return perf
def ComputeDown(self, layer, step):
"""Backpropagate through this layer.
Args:
step: The training step. Needed because some hyperparameters depend on
which training step they are being used in.
"""
if layer.is_input: # Nobody to backprop to.
return
# At this point layer.deriv contains the derivative with respect to the
# outputs of this layer. Compute derivative with respect to the inputs.
if layer.is_output:
loss = layer.GetLoss(get_deriv=True)
else:
loss = None
if layer.hyperparams.sparsity:
sparsity_gradient = layer.GetSparsityGradient()
layer.deriv.add_col_vec(sparsity_gradient)
layer.ComputeDeriv()
# Now layer.deriv contains the derivative w.r.t to the inputs.
# Send it down each incoming edge and update parameters on the edge.
for edge in layer.incoming_edge:
if edge.conv or edge.local:
AccumulateConvDeriv(edge.node1, edge, layer.deriv)
else:
self.AccumulateDeriv(edge.node1, edge, layer.deriv)
self.UpdateEdgeParams(edge, layer.deriv, step)
# $$ Update weight into the original bias vector here
# Update the parameters on this layer (i.e., the bias).
self.UpdateLayerParams(layer, step)
# $$ Update small bias into the original weight matrix here
return loss
def AccumulateDeriv(self, layer, edge, deriv):
"""Accumulate the derivative w.r.t the outputs of this layer.
A layer needs to compute derivatives w.r.t its outputs. These outputs may
have been connected to lots of other nodes through outgoing edges.
This method adds up the derivatives contributed by each outgoing edge.
It gets derivatives w.r.t the inputs at the other end of its outgoing edge.
Args:
edge: The edge which is sending the derivative.
deriv: The derivative w.r.t the inputs at the other end of this edge.
"""
if layer.is_input or edge.proto.block_gradient:
return
if layer.dirty: # If some derivatives have already been received.
layer.deriv.add_dot(edge.params['weight'], deriv)
else: # Receiving derivative for the first time.
cm.dot(edge.params['weight'], deriv, target=layer.deriv)
layer.dirty = True
def UpdateEdgeParams(self, edge, deriv, step):
""" Update the parameters associated with this edge.
Update the weights and associated parameters.
Args:
deriv: Gradient w.r.t the inputs at the outgoing end.
step: Training step.
"""
numcases = edge.node1.batchsize
if edge.conv or edge.local:
ConvOuter(edge, edge.temp)
edge.gradient.add_mult(edge.temp, mult=1.0 / numcases)
else:
edge.gradient.add_dot(edge.node1.state, deriv.T, mult=1.0 / numcases)
if edge.tied_to:
edge.tied_to.gradient.add(edge.gradient)
edge.gradient.assign(0)
edge = edge.tied_to
edge.num_grads_received += 1
if edge.num_grads_received == edge.num_shares:
edge.Update('weight', step)
def UpdateLayerParams(self, layer, step):
""" Update the parameters associated with this layer.
Update the bias.
Args:
step: Training step.
"""
layer.gradient.add_sums(layer.deriv, axis=1, mult=1.0 / layer.batchsize)
if layer.tied_to:
layer.tied_to.gradient.add(layer.gradient)
layer.gradient.assign(0)
layer = layer.tied_to
layer.num_grads_received += 1
if layer.num_grads_received == layer.num_shares:
layer.Update('bias', step, no_reg=True) # By default, do not regularize bias.
def ForwardPropagate(self, train=False, step=0):
"""Do a forward pass through the network.
Args:
train: True if the forward pass is done during training, False during
evaluation.
step: Training step.
"""
losses = []
for node in self.node_list:
loss = self.ComputeUp(node, train, step, self.train_stop_steps)
if loss:
losses.append(loss)
return losses
def BackwardPropagate(self, step):
"""Backprop through the network.
Args:
step: Training step.
"""
losses = []
for node in reversed(self.node_list):
loss = self.ComputeDown(node, step)
if loss:
losses.append(loss)
return losses
def TrainOneBatch(self, step):
"""Train once on one mini-batch.
Args:
step: Training step.
Returns:
List of losses incurred at each output layer.
"""
losses1 = self.ForwardPropagate(train=True)
losses2 = self.BackwardPropagate(step)
losses1.extend(losses2)
return losses1
def EvaluateOneBatch(self):
"""Evaluate one mini-batch."""
losses = self.ForwardPropagate()
losses.extend([node.GetLoss() for node in self.output_datalayer])
return losses
def Evaluate(self, validation=True, collect_predictions=False):
"""Evaluate the model.
Args:
validation: If True, evaluate on the validation set,
else evaluate on test set.
collect_predictions: If True, collect the predictions.
"""
step = 0
stats = []
if validation:
stopcondition = self.ValidationStopCondition
stop = stopcondition(step)
if stop or self.validation_data_handler is None:
return
datagetter = self.GetValidationBatch
prefix = 'V'
stats_list = self.net.validation_stats
num_batches = self.validation_data_handler.num_batches
else:
stopcondition = self.TestStopCondition
stop = stopcondition(step)
if stop or self.test_data_handler is None:
return
datagetter = self.GetTestBatch
prefix = 'E'
stats_list = self.net.test_stats
num_batches = self.test_data_handler.num_batches
if collect_predictions:
output_layer = self.output_datalayer[0]
collect_pos = 0
batchsize = output_layer.batchsize
numdims = output_layer.state.shape[0]
predictions = np.zeros((batchsize * num_batches, numdims))
targets = np.zeros(predictions.shape)
while not stop:
datagetter()
losses = self.EvaluateOneBatch()
if collect_predictions:
predictions[collect_pos:collect_pos + batchsize] = \
output_layer.state.asarray().T
targets[collect_pos:collect_pos + batchsize] = \
output_layer.data.asarray().T
collect_pos += batchsize
if stats:
for loss, acc in zip(losses, stats):
Accumulate(acc, loss)
else:
stats = losses
step += 1
stop = stopcondition(step)
if collect_predictions and stats:
predictions = predictions[:collect_pos]
targets = targets[:collect_pos]
MAP, prec50, MAP_list, prec50_list = self.ComputeScore(predictions, targets)
stat = stats[0]
stat.MAP = MAP
stat.prec50 = prec50
for m in MAP_list:
stat.MAP_list.extend([m])
for m in prec50_list:
stat.prec50_list.extend([m])
for stat in stats:
sys.stdout.write(GetPerformanceStats(stat, prefix=prefix))
stats_list.extend(stats)
return stat
def ScoreOneLabel(self, preds, targets):
"""Computes Average precision and precision at 50."""
targets_sorted = targets[(-preds.T).argsort().flatten(), :]
cumsum = targets_sorted.cumsum()
prec = cumsum / np.arange(1.0, 1 + targets.shape[0])
total_pos = float(sum(targets))
if total_pos == 0:
total_pos = 1e-10
recall = cumsum / total_pos
ap = np.dot(prec, targets_sorted) / total_pos
prec50 = prec[50]
return ap, prec50
def ComputeScore(self, preds, targets):
"""Computes Average precision and precision at 50."""
assert preds.shape == targets.shape
numdims = preds.shape[1]
ap = 0
prec = 0
ap_list = []
prec_list = []
for i in range(numdims):
this_ap, this_prec = self.ScoreOneLabel(preds[:, i], targets[:, i])
ap_list.append(this_ap)
prec_list.append(this_prec)
ap += this_ap
prec += this_prec
ap /= numdims
prec /= numdims
return ap, prec, ap_list, prec_list
def WriteRepresentationToDisk(self, layernames, output_dir, memory='1G', dataset='test', drop=False):
layers = [self.GetLayerByName(lname) for lname in layernames]
numdim_list = [layer.state.shape[0] for layer in layers]
if dataset == 'train':
datagetter = self.GetTrainBatch
if self.train_data_handler is None:
return
numbatches = self.train_data_handler.num_batches
size = numbatches * self.train_data_handler.batchsize
elif dataset == 'validation':
datagetter = self.GetValidationBatch
if self.validation_data_handler is None:
return
numbatches = self.validation_data_handler.num_batches
size = numbatches * self.validation_data_handler.batchsize
elif dataset == 'test':
datagetter = self.GetTestBatch
if self.test_data_handler is None:
return
numbatches = self.test_data_handler.num_batches
size = numbatches * self.test_data_handler.batchsize
datawriter = DataWriter(layernames, output_dir, memory, numdim_list, size)
for batch in range(numbatches):
datagetter()
sys.stdout.write('\r%d' % (batch + 1))
sys.stdout.flush()
self.ForwardPropagate(train=drop)
reprs = [l.state.asarray().T for l in layers]
datawriter.Submit(reprs)
sys.stdout.write('\n')
return datawriter.Commit()
def TrainStopCondition(self, step):
return step >= self.train_stop_steps
def ValidationStopCondition(self, step):
return step >= self.validation_stop_steps
def TestStopCondition(self, step):
return step >= self.test_stop_steps
def EvalNow(self, step):
return step % self.eval_now_steps == 0
def SaveNow(self, step):
return step % self.save_now_steps == 0
def ShowNow(self, step):
return self.show_now_steps > 0 and step % self.show_now_steps == 0
def GetLayerByName(self, layername, down=False):
try:
l = next(l for l in self.layer if l.name == layername)
except StopIteration:
l = None
return l
def CopyModelToCPU(self):
for layer in self.layer:
layer.SaveParameters()
for edge in self.edge:
edge.SaveParameters()
def ResetBatchsize(self, batchsize):
self.batchsize = batchsize
for layer in self.layer:
layer.AllocateBatchsizeDependentMemory(batchsize)
for edge in self.edge:
edge.AllocateBatchsizeDependentMemory()
def GetBatch(self, handler=None):
if handler:
data_list = handler.Get()
if data_list[0].shape[1] != self.batchsize:
self.ResetBatchsize(data_list[0].shape[1])
for i, layer in enumerate(self.datalayer):
layer.SetData(data_list[i])
for layer in self.tied_datalayer:
data = layer.data_tied_to.data
if data.shape[1] != self.batchsize:
self.ResetBatchsize(data.shape[1])
layer.SetData(data)
def GetTrainBatch(self):
self.GetBatch(self.train_data_handler)
def GetValidationBatch(self):
self.GetBatch(self.validation_data_handler)
def GetTestBatch(self):
self.GetBatch(self.test_data_handler)
def SetUpData(self, skip_outputs=False, skip_layernames=[]):
"""Setup the data."""
hyp_list = []
name_list = [[], [], []]
for node in self.layer:
if not (node.is_input or node.is_output):
continue
if skip_outputs and node.is_output:
continue
if node.name in skip_layernames:
continue
data_field = node.proto.data_field
if data_field.tied:
self.tied_datalayer.append(node)
node.data_tied_to = next(l for l in self.datalayer \
if l.name == data_field.tied_to)
else:
self.datalayer.append(node)
hyp_list.append(node.hyperparams)
if data_field.train:
name_list[0].append(data_field.train)
if data_field.validation:
name_list[1].append(data_field.validation)
if data_field.test:
name_list[2].append(data_field.test)
if self.t_op:
op = self.t_op
else:
op = self.e_op
handles = GetDataHandles(op, name_list, hyp_list,
verbose=self.verbose)
self.train_data_handler = handles[0]
self.validation_data_handler = handles[1]
self.test_data_handler = handles[2]
def SetUpTrainer(self):
"""Load the model, setup the data, set the stopping conditions."""
self.LoadModelOnGPU()
if self.verbose:
self.PrintNetwork()
self.SetUpData()
if self.t_op.stopcondition.all_processed:
num_steps = self.train_data_handler.num_batches
else:
num_steps = self.t_op.stopcondition.steps
self.train_stop_steps = num_steps
if self.e_op.stopcondition.all_processed and self.validation_data_handler:
num_steps = self.validation_data_handler.num_batches
else:
num_steps = self.e_op.stopcondition.steps
self.validation_stop_steps = num_steps
if self.e_op.stopcondition.all_processed and self.test_data_handler:
num_steps = self.test_data_handler.num_batches
else:
num_steps = self.e_op.stopcondition.steps
self.test_stop_steps = num_steps
self.eval_now_steps = self.t_op.eval_after
self.save_now_steps = self.t_op.checkpoint_after
self.show_now_steps = self.t_op.show_after
self.ExchangeGlobalInfo()
def Show(self):
"""Visualize the state of the layers and edges in the network."""
for layer in self.layer:
layer.Show()
for edge in self.edge:
edge.Show()
def GetRandomNum(self):
"""Get random column numbers for each layers which are using dropout."""
del self.randNum[:]
for node in range(1, len(self.node_list) - 1): # Generate random numbers for only hidden layers (sorted, no-duplication)
self.randNum.append(
# np.array([0,2,4,6,8]))
np.sort(np.random.choice(range(self.node_list[node].dimensions), self.node_list[node].dimensions/2, replace=False))) #no duplication
# TODO 3. Implement column wise dropout whose result should be equal with Controlled Dropout code
"""
def ConstructSmallNet(self):
# Construct parameters(w, b) for small network with random numbers.
# weight: self.edge[i].params['weight'] (0~3): (784x1024),(1024x1024),(1024x2048),(2048x10)
# bias: self.layer[i].params['bias'] (0~4): (-),(10x1),(1024,1),(1024,1),(2048,1)
def UpdateOriginalNet(self):
# Update parameters(W, b) of small net to parameters(W, b) of original net
"""
def ConstructSmallNet(self):
"""Construct parameters(w, b) for small network with random numbers."""
# weight: self.edge[i].params['weight'] (0~3): (784x1024),(1024x1024),(1024x2048),(2048x10)
# bias: self.layer[i].params['bias'] (0~4): (-),(10x1),(1024,1),(1024,1),(2048,1)
if use_gpu == 'yes':
# Update weight
temp = self.edge[0].params['weight'].asarray()
self.small_net.edge[0].params['weight'].overwrite(temp[..., self.randNum[0]])
for i in range(len(self.randNum) - 1):
temp = self.edge[i + 1].params['weight'].asarray()
self.small_net.edge[i + 1].params['weight'].overwrite(temp[self.randNum[i][:, np.newaxis], self.randNum[i + 1]])
temp = self.edge[len(self.randNum)].params['weight'].asarray()
self.small_net.edge[len(self.randNum)].params['weight'].overwrite(temp[self.randNum[-1], ...])
# Update bias
for i in range(len(self.randNum)):
temp = self.layer[i + 2].params['bias'].asarray()
self.small_net.layer[i + 2].params['bias'].overwrite(temp[self.randNum[i]])
temp = self.layer[1].params['bias'].asarray()
self.small_net.layer[1].params['bias'].overwrite(temp)
elif use_gpu == 'no':
# Update weight
self.small_net.edge[0].params['weight'] = cm.EigenMatrix(
self.edge[0].params['weight'].numpy_array[..., self.randNum[0]])
for i in range(len(self.randNum) - 1): # TODO: Can be combined with only a 'for' statement
self.small_net.edge[i + 1].params['weight'] = \
cm.EigenMatrix(self.edge[i + 1].params['weight'].numpy_array[
self.randNum[i][:, np.newaxis], self.randNum[i + 1]])
self.small_net.edge[len(self.randNum)].params['weight'] = \
cm.EigenMatrix(self.edge[len(self.randNum)].params['weight'].numpy_array[self.randNum[-1], ...])
# Update bias
for i in range(len(self.randNum)):
self.small_net.layer[i + 2].params['bias'] = cm.EigenMatrix(
self.layer[i + 2].params['bias'].numpy_array[self.randNum[i]])
self.small_net.layer[1].params['bias'] = cm.EigenMatrix(self.layer[1].params['bias'].numpy_array)
def UpdateOriginalNet(self):
"""Update parameters(W, b) of small net to parameters(W, b) of original net"""
if use_gpu == 'yes':
# Update weight
temp = self.edge[0].params['weight'].asarray()
temp[..., self.randNum[0]] = self.small_net.edge[0].params['weight'].asarray()
self.edge[0].params['weight'].overwrite(temp)
for i in range(len(self.randNum) - 1):
temp = self.edge[i + 1].params['weight'].asarray()
temp[self.randNum[i][:, np.newaxis], self.randNum[i + 1]] = self.small_net.edge[i + 1].params['weight'].asarray()
self.edge[i + 1].params['weight'].overwrite(temp)
temp = self.edge[len(self.randNum)].params['weight'].asarray()
temp[self.randNum[len(self.randNum) - 1], ...] = self.small_net.edge[len(self.randNum)].params['weight'].asarray()
self.edge[len(self.randNum)].params['weight'].overwrite(temp)
# Update bias
for i in range(len(self.randNum)):
temp = self.layer[i + 2].params['bias'].asarray()
temp[self.randNum[i]] = self.small_net.layer[i + 2].params['bias'].asarray()
self.layer[i + 2].params['bias'].overwrite(temp)
temp = self.small_net.layer[1].params['bias'].asarray()
self.layer[1].params['bias'].overwrite(temp)
elif use_gpu == 'no':
# Update weight
temp = np.copy(self.edge[0].params['weight'].numpy_array)
temp[..., self.randNum[0]] = self.small_net.edge[0].params['weight'].numpy_array
self.edge[0].params['weight'] = cm.EigenMatrix(temp)
for i in range(len(self.randNum) - 1):
temp = np.copy(self.edge[i + 1].params['weight'].numpy_array)
temp[self.randNum[i][:, np.newaxis], self.randNum[i + 1]] = \
self.small_net.edge[i + 1].params['weight'].numpy_array
self.edge[i + 1].params['weight'] = cm.EigenMatrix(temp)
temp = np.copy(self.edge[len(self.randNum)].params['weight'].numpy_array)
temp[self.randNum[len(self.randNum) - 1], ...] = \
self.small_net.edge[len(self.randNum)].params['weight'].numpy_array
self.edge[len(self.randNum)].params['weight'] = cm.EigenMatrix(temp)
# Update bias
for i in range(len(self.randNum)):
temp = np.copy(self.layer[i + 2].params['bias'].numpy_array)
temp[self.randNum[i]] = self.small_net.layer[i + 2].params['bias'].numpy_array
self.layer[i + 2].params['bias'] = cm.EigenMatrix(temp)
self.layer[1].params['bias'] = cm.EigenMatrix(self.small_net.layer[1].params['bias'].numpy_array)
def Train(self):
"""Train the model."""
start_time = time.time()
assert self.t_op is not None, 't_op is None.'
assert self.e_op is not None, 'e_op is None.'
self.SetUpTrainer()
self.small_net.SetUpTrainer() # SMALL
step = self.t_op.current_step
stop = self.TrainStopCondition(step)
stats = []
collect_predictions = False
try:
p = self.output_datalayer[0].proto.performance_stats
if p.compute_MAP or p.compute_prec50:
collect_predictions = True
except Exception as e:
pass
select_model_using_error = self.net.hyperparams.select_model_using_error
select_model_using_acc = self.net.hyperparams.select_model_using_acc
select_model_using_map = self.net.hyperparams.select_model_using_map
select_best = select_model_using_error or select_model_using_acc or select_model_using_map
if select_best:
best_valid_error = float('Inf')
test_error = float('Inf')
best_net = self.DeepCopy()
dump_best = False
with open('/home/hpc/github/ControlledDropout/deepnet/examples/csv/mem_test.csv', 'w') as csvfile:
fieldnames = ['Step', 'T_CE', 'T_Acc', 'T_Res', 'V_CE', 'V_Acc', 'V_Res', 'E_CE', 'E_Acc', 'E_Res', 'Time', 'Mem']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
while not stop:
sys.stdout.write('\rTrain Step: %d' % step)
sys.stdout.flush()
# 0. Get training batch
# self.GetTrainBatch() # For orignial net
self.small_net.GetTrainBatch() # SMALL) for small net
# 1. Get random numbers
self.GetRandomNum()
# 2. Construct parameters(w, b) for small network
self.ConstructSmallNet()
# 3. Train the batch
# losses = self.TrainOneBatch(step) # for orignial net
losses = self.small_net.TrainOneBatch(step) # SMALL) for small net
# 4. Update the parameters(W, b) of original network from small network
self.UpdateOriginalNet()
# 5. Save the training accuracy
if stats: # Save the training accuracy
for acc, loss in zip(stats, losses):
Accumulate(acc, loss)
else:
stats = losses
step += 1
# if self.ShowNow(step):
# self.Show()
if self.EvalNow(step):
# Print out training stats.
sys.stdout.write('\rStep %d ' % step)
mem_usage = memory_usage(proc=-1, interval=.1, timeout=None)
sys.stdout.write('Mem %dMB' % mem_usage[0])
for stat in stats:
sys.stdout.write(GetPerformanceStats(stat, prefix='T'))
self.net.train_stats.extend(stats)
stats = []
# Evaluate on validation set.
val = self.Evaluate(validation=True, collect_predictions=collect_predictions)
# Evaluate on test set.
tes = self.Evaluate(validation=False, collect_predictions=collect_predictions)
# Write on csv file
writer.writerow({'Step': step,
'T_CE': stat.cross_entropy / stat.count,
'T_Acc': stat.correct_preds / stat.count,
'T_Res': stat.correct_preds,
'V_CE': val.cross_entropy / val.count,
'V_Acc': val.correct_preds / val.count,
'V_Res': val.correct_preds,
'E_CE': tes.cross_entropy / tes.count,
'E_Acc': tes.correct_preds / tes.count,
'E_Res': tes.correct_preds,
'Time': time.time() - start_time,
'Mem' : mem_usage[0]
})
if select_best:
valid_stat = self.net.validation_stats[-1]
if len(self.net.test_stats) > 1:
test_stat = self.net.test_stats[-1]
else:
test_stat = valid_stat
if select_model_using_error:
valid_error = valid_stat.error / valid_stat.count
_test_error = test_stat.error / test_stat.count
elif select_model_using_acc:
valid_error = 1 - float(valid_stat.correct_preds) / valid_stat.count
_test_error = 1 - float(test_stat.correct_preds) / test_stat.count
elif select_model_using_map:
valid_error = 1 - valid_stat.MAP
_test_error = 1 - test_stat.MAP
if valid_error < best_valid_error:
best_valid_error = valid_error
test_error = _test_error
dump_best = True
self.CopyModelToCPU()
self.t_op.current_step = step
self.net.best_valid_stat.CopyFrom(valid_stat)
self.net.train_stat_es.CopyFrom(self.net.train_stats[-1])
self.net.test_stat_es.CopyFrom(test_stat)
best_net = self.DeepCopy()
best_t_op = CopyOperation(self.t_op)
# for e in self.edge:
# sys.stdout.write(' %s %.3f' % (e.name, e.params['weight'].euclid_norm()))
sys.stdout.write('\n')
if self.SaveNow(step):
self.t_op.current_step = step
self.CopyModelToCPU()
util.WriteCheckpointFile(self.net, self.t_op)
if dump_best:
dump_best = False
if select_model_using_error:
print 'Best valid error : %.4f Test error %.4f' % (best_valid_error, test_error)
elif select_model_using_acc:
print 'Best valid acc : %.4f Test acc %.4f' % (1 - best_valid_error, 1 - test_error)
elif select_model_using_map:
print 'Best valid MAP : %.4f Test MAP %.4f' % (1 - best_valid_error, 1 - test_error)
util.WriteCheckpointFile(best_net, best_t_op, best=True)
stop = self.TrainStopCondition(step)
| {
"content_hash": "687dc640297106bfb4fe5063a1d765e9",
"timestamp": "",
"source": "github",
"line_count": 874,
"max_line_length": 148,
"avg_line_length": 42.504576659038904,
"alnum_prop": 0.5497052410562868,
"repo_name": "kobiso/ControlledDropout",
"id": "f46aa01da950fc01d97c02fafa494e1fe419c4a1",
"size": "37149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deepnet/ff_cd.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "853"
},
{
"name": "C++",
"bytes": "55929"
},
{
"name": "Cuda",
"bytes": "365893"
},
{
"name": "Makefile",
"bytes": "1640"
},
{
"name": "Protocol Buffer",
"bytes": "10826"
},
{
"name": "Python",
"bytes": "436774"
},
{
"name": "Shell",
"bytes": "8392"
}
],
"symlink_target": ""
} |
from .list import List
from .array import Array
from .tree import Tree
from .graph import Graph
from .cartesian import Cartesian | {
"content_hash": "b6d3f205f700e58dfafb0586a202b997",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 32,
"avg_line_length": 25.6,
"alnum_prop": 0.8125,
"repo_name": "alviproject/alvi",
"id": "20c022c352756f0a00441e159192c1cc049ee02f",
"size": "128",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alvi/containers/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "42568"
},
{
"name": "HTML",
"bytes": "35975"
},
{
"name": "JavaScript",
"bytes": "152425"
},
{
"name": "Python",
"bytes": "108114"
},
{
"name": "Shell",
"bytes": "234"
}
],
"symlink_target": ""
} |
import re
import pytest
from pytest_embedded import Dut
@pytest.mark.esp32
@pytest.mark.esp32c3
def test_partition_mmap_example(dut: Dut) -> None:
# ESP_ERROR_CHECK or assert will cause abort on error and "Example end" won't be received
message_list = (rb'Written sample data to partition: ESP-IDF Partition Memory Map Example',
rb'Mapped partition to data memory address \S+',
rb'Read sample data from partition using mapped memory: ESP-IDF Partition Memory Map Example',
rb'Data matches',
rb'Unmapped partition from data memory',
rb'Example end')
for msg in message_list:
dut.expect(re.compile(msg), timeout=20)
| {
"content_hash": "69321af671977e46fbe7e8891b8ec62d",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 114,
"avg_line_length": 38.8421052631579,
"alnum_prop": 0.6476964769647696,
"repo_name": "espressif/esp-idf",
"id": "e77d29fe5bc72cf12e33a289277d58ae9ef3e349",
"size": "854",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/storage/partition_api/partition_mmap/pytest_partition_mmap_example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "388440"
},
{
"name": "Batchfile",
"bytes": "5451"
},
{
"name": "C",
"bytes": "69102322"
},
{
"name": "C++",
"bytes": "992772"
},
{
"name": "CMake",
"bytes": "539972"
},
{
"name": "Dockerfile",
"bytes": "3290"
},
{
"name": "Makefile",
"bytes": "23747"
},
{
"name": "Nim",
"bytes": "1005"
},
{
"name": "PowerShell",
"bytes": "4537"
},
{
"name": "Python",
"bytes": "2158180"
},
{
"name": "Roff",
"bytes": "101"
},
{
"name": "Shell",
"bytes": "126143"
}
],
"symlink_target": ""
} |
from mox import IsA # noqa
from django.core.urlresolvers import reverse # noqa
from django.core.urlresolvers import reverse_lazy # noqa
from django import http
from horizon.workflows import views
from openstack_dashboard import api
from openstack_dashboard.api import lbaas
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.project.loadbalancers import workflows
class LoadBalancerTests(test.TestCase):
class AttributeDict(dict):
def __getattr__(self, attr):
return self[attr]
def __setattr__(self, attr, value):
self[attr] = value
DASHBOARD = 'project'
INDEX_URL = reverse_lazy('horizon:%s:loadbalancers:index' % DASHBOARD)
ADDPOOL_PATH = 'horizon:%s:loadbalancers:addpool' % DASHBOARD
ADDVIP_PATH = 'horizon:%s:loadbalancers:addvip' % DASHBOARD
ADDMEMBER_PATH = 'horizon:%s:loadbalancers:addmember' % DASHBOARD
ADDMONITOR_PATH = 'horizon:%s:loadbalancers:addmonitor' % DASHBOARD
POOL_DETAIL_PATH = 'horizon:%s:loadbalancers:pooldetails' % DASHBOARD
VIP_DETAIL_PATH = 'horizon:%s:loadbalancers:vipdetails' % DASHBOARD
MEMBER_DETAIL_PATH = 'horizon:%s:loadbalancers:memberdetails' % DASHBOARD
MONITOR_DETAIL_PATH = 'horizon:%s:loadbalancers:monitordetails' % DASHBOARD
UPDATEPOOL_PATH = 'horizon:%s:loadbalancers:updatepool' % DASHBOARD
UPDATEVIP_PATH = 'horizon:%s:loadbalancers:updatevip' % DASHBOARD
UPDATEMEMBER_PATH = 'horizon:%s:loadbalancers:updatemember' % DASHBOARD
UPDATEMONITOR_PATH = 'horizon:%s:loadbalancers:updatemonitor' % DASHBOARD
ADDASSOC_PATH = 'horizon:%s:loadbalancers:addassociation' % DASHBOARD
DELETEASSOC_PATH = 'horizon:%s:loadbalancers:deleteassociation' % DASHBOARD
def set_up_expect(self):
# retrieve pools
vip1 = self.vips.first()
vip2 = self.vips.list()[1]
api.lbaas.pools_get(
IsA(http.HttpRequest)).AndReturn(self.pools.list())
api.lbaas.vip_get(IsA(http.HttpRequest), vip1.id).AndReturn(vip1)
api.lbaas.vip_get(IsA(http.HttpRequest), vip2.id).AndReturn(vip2)
# retrieves members
api.lbaas.members_get(
IsA(http.HttpRequest)).AndReturn(self.members.list())
pool1 = self.pools.first()
pool2 = self.pools.list()[1]
api.lbaas.pool_get(IsA(http.HttpRequest),
self.members.list()[0].pool_id).AndReturn(pool1)
api.lbaas.pool_get(IsA(http.HttpRequest),
self.members.list()[1].pool_id).AndReturn(pool2)
# retrieves monitors
api.lbaas.pool_health_monitors_get(
IsA(http.HttpRequest)).MultipleTimes() \
.AndReturn(self.monitors.list())
def set_up_expect_with_exception(self):
api.lbaas.pools_get(
IsA(http.HttpRequest)).AndRaise(self.exceptions.neutron)
api.lbaas.members_get(
IsA(http.HttpRequest)).AndRaise(self.exceptions.neutron)
api.lbaas.pool_health_monitors_get(
IsA(http.HttpRequest)).AndRaise(self.exceptions.neutron)
@test.create_stubs({api.lbaas: ('pools_get', 'vip_get',
'members_get', 'pool_get',
'pool_health_monitors_get'),
api.neutron: ('subnet_get',)})
def test_index_pools(self):
self.set_up_expect()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res, '%s/loadbalancers/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['table'].data),
len(self.pools.list()))
@test.create_stubs({api.lbaas: ('pools_get', 'vip_get',
'members_get', 'pool_get',
'pool_health_monitors_get'),
api.neutron: ('subnet_get',)})
def test_index_members(self):
self.set_up_expect()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL + '?tab=lbtabs__members')
self.assertTemplateUsed(res, '%s/loadbalancers/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['memberstable_table'].data),
len(self.members.list()))
@test.create_stubs({api.lbaas: ('pools_get', 'vip_get',
'pool_health_monitors_get',
'members_get', 'pool_get'),
api.neutron: ('subnet_get',)})
def test_index_monitors(self):
self.set_up_expect()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL + '?tab=lbtabs__monitors')
self.assertTemplateUsed(res, '%s/loadbalancers/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res, 'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['monitorstable_table'].data),
len(self.monitors.list()))
@test.create_stubs({api.lbaas: ('pools_get', 'members_get',
'pool_health_monitors_get')})
def test_index_exception_pools(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL)
self.assertTemplateUsed(res,
'%s/loadbalancers/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['table'].data), 0)
@test.create_stubs({api.lbaas: ('pools_get', 'members_get',
'pool_health_monitors_get')})
def test_index_exception_members(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL + '?tab=lbtabs__members')
self.assertTemplateUsed(res,
'%s/loadbalancers/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['memberstable_table'].data), 0)
@test.create_stubs({api.lbaas: ('pools_get', 'members_get',
'pool_health_monitors_get')})
def test_index_exception_monitors(self):
self.set_up_expect_with_exception()
self.mox.ReplayAll()
res = self.client.get(self.INDEX_URL + '?tab=lbtabs__monitors')
self.assertTemplateUsed(res,
'%s/loadbalancers/details_tabs.html'
% self.DASHBOARD)
self.assertTemplateUsed(res,
'horizon/common/_detail_table.html')
self.assertEqual(len(res.context['monitorstable_table'].data), 0)
@test.create_stubs({api.neutron: ('network_list_for_tenant',
'provider_list',
'is_extension_supported'),
api.lbaas: ('pool_create', )})
def test_add_pool_post(self):
pool = self.pools.first()
subnet = self.subnets.first()
networks = [{'subnets': [subnet, ]}, ]
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'service-type').AndReturn(True)
api.neutron.network_list_for_tenant(
IsA(http.HttpRequest), subnet.tenant_id).AndReturn(networks)
api.neutron.provider_list(IsA(http.HttpRequest)) \
.AndReturn(self.providers.list())
api.lbaas.pool_create(
IsA(http.HttpRequest),
name=pool.name,
description=pool.description,
subnet_id=pool.subnet_id,
protocol=pool.protocol,
lb_method=pool.lb_method,
admin_state_up=pool.admin_state_up,
provider=pool.provider).AndReturn(pool)
self.mox.ReplayAll()
form_data = {'name': pool.name,
'description': pool.description,
'subnet_id': pool.subnet_id,
'protocol': pool.protocol,
'lb_method': pool.lb_method,
'admin_state_up': pool.admin_state_up}
res = self.client.post(reverse(self.ADDPOOL_PATH), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.neutron: ('network_list_for_tenant',
'provider_list',
'is_extension_supported')})
def test_add_pool_get(self):
self._test_add_pool_get(with_service_type=True)
@test.create_stubs({api.neutron: ('network_list_for_tenant',
'provider_list',
'is_extension_supported')})
def test_add_pool_get_provider_list_exception(self):
self._test_add_pool_get(with_service_type=True)
@test.create_stubs({api.neutron: ('network_list_for_tenant',
'is_extension_supported')})
def test_add_pool_get_without_service_type_support(self):
self._test_add_pool_get(with_service_type=False)
def _test_add_pool_get(self, with_service_type=True,
with_provider_exception=False):
subnet = self.subnets.first()
default_provider = self.providers.first()['name']
networks = [{'subnets': [subnet, ]}, ]
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'service-type').AndReturn(with_service_type)
api.neutron.network_list_for_tenant(
IsA(http.HttpRequest), subnet.tenant_id).AndReturn(networks)
if with_service_type:
prov_list = api.neutron.provider_list(IsA(http.HttpRequest))
if with_provider_exception:
prov_list.AndRaise(self.exceptions.neutron)
else:
prov_list.AndReturn(self.providers.list())
self.mox.ReplayAll()
res = self.client.get(reverse(self.ADDPOOL_PATH))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.AddPool.name)
expected_objs = ['<AddPoolStep: addpoolaction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
if not with_service_type:
self.assertNotContains(res, default_provider)
self.assertContains(res, ('Provider for Load Balancer '
'is not supported.'))
elif with_provider_exception:
self.assertNotContains(res, default_provider)
self.assertContains(res, 'No provider is available.')
else:
self.assertContains(res, default_provider)
def test_add_vip_post(self):
self._test_add_vip_post()
def test_add_vip_post_no_connection_limit(self):
self._test_add_vip_post(with_conn_limit=False)
@test.create_stubs({api.lbaas: ('pool_get', 'vip_create'),
api.neutron: ('subnet_get', )})
def _test_add_vip_post(self, with_conn_limit=True):
vip = self.vips.first()
subnet = self.subnets.first()
pool = self.pools.first()
api.lbaas.pool_get(
IsA(http.HttpRequest), pool.id).MultipleTimes().AndReturn(pool)
api.neutron.subnet_get(
IsA(http.HttpRequest), subnet.id).AndReturn(subnet)
params = {'name': vip.name,
'description': vip.description,
'pool_id': vip.pool_id,
'address': vip.address,
'floatip_address': vip.floatip_address,
'other_address': vip.other_address,
'subnet': vip.subnet,
'subnet_id': vip.subnet_id,
'protocol_port': vip.protocol_port,
'protocol': vip.protocol,
'session_persistence': vip.session_persistence['type'],
'cookie_name': vip.session_persistence['cookie_name'],
'admin_state_up': vip.admin_state_up,
}
if with_conn_limit:
params['connection_limit'] = vip.connection_limit
api.lbaas.vip_create(
IsA(http.HttpRequest), **params).AndReturn(vip)
self.mox.ReplayAll()
form_data = {'name': vip.name,
'description': vip.description,
'pool_id': vip.pool_id,
'address': vip.address,
'floatip_address': vip.floatip_address,
'other_address': vip.other_address,
'subnet_id': vip.subnet_id,
'subnet': vip.subnet,
'protocol_port': vip.protocol_port,
'protocol': vip.protocol,
'session_persistence': vip.session_persistence['type'],
'cookie_name': vip.session_persistence['cookie_name'],
'admin_state_up': vip.admin_state_up}
if with_conn_limit:
form_data['connection_limit'] = vip.connection_limit
res = self.client.post(
reverse(self.ADDVIP_PATH, args=(pool.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('pool_get', ),
api.neutron: ('subnet_get', )})
def test_add_vip_post_with_error(self):
vip = self.vips.first()
subnet = self.subnets.first()
pool = self.pools.first()
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
api.neutron.subnet_get(
IsA(http.HttpRequest), subnet.id).AndReturn(subnet)
self.mox.ReplayAll()
form_data = {'name': vip.name,
'description': vip.description,
'pool_id': vip.pool_id,
'address': vip.address,
'subnet_id': vip.subnet_id,
'protocol_port': 65536,
'protocol': vip.protocol,
'session_persistence': vip.session_persistence['type'],
'cookie_name': vip.session_persistence['cookie_name'],
'connection_limit': -2,
'admin_state_up': vip.admin_state_up}
res = self.client.post(
reverse(self.ADDVIP_PATH, args=(pool.id,)), form_data)
self.assertFormErrors(res, 2)
@test.create_stubs({api.lbaas: ('pool_get', ),
api.neutron: ('subnet_get', )})
def test_add_vip_get(self):
subnet = self.subnets.first()
pool = self.pools.first()
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
api.neutron.subnet_get(
IsA(http.HttpRequest), subnet.id).AndReturn(subnet)
self.mox.ReplayAll()
res = self.client.get(reverse(self.ADDVIP_PATH, args=(pool.id,)))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.AddVip.name)
expected_objs = ['<AddVipStep: addvipaction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
@test.create_stubs({api.lbaas: ('pool_health_monitor_create', )})
def test_add_monitor_post(self):
monitor = self.monitors.first()
api.lbaas.pool_health_monitor_create(
IsA(http.HttpRequest),
type=monitor.type,
delay=monitor.delay,
timeout=monitor.timeout,
max_retries=monitor.max_retries,
http_method=monitor.http_method,
url_path=monitor.url_path,
expected_codes=monitor.expected_codes,
admin_state_up=monitor.admin_state_up).AndReturn(
lbaas.PoolMonitor(monitor))
self.mox.ReplayAll()
form_data = {'type': monitor.type,
'delay': monitor.delay,
'timeout': monitor.timeout,
'max_retries': monitor.max_retries,
'http_method': monitor.http_method,
'url_path': monitor.url_path,
'expected_codes': monitor.expected_codes,
'admin_state_up': monitor.admin_state_up}
res = self.client.post(reverse(self.ADDMONITOR_PATH), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
def test_add_monitor_post_with_error(self):
monitor = self.monitors.first()
form_data = {'type': monitor.type,
'delay': 0,
'timeout': 0,
'max_retries': 11,
'http_method': monitor.http_method,
'url_path': monitor.url_path,
'expected_codes': monitor.expected_codes,
'admin_state_up': monitor.admin_state_up}
res = self.client.post(reverse(self.ADDMONITOR_PATH), form_data)
self.assertFormErrors(res, 3)
def test_add_monitor_post_with_httpmethod_error(self):
monitor = self.monitors.first()
form_data = {'type': 'http',
'delay': monitor.delay,
'timeout': monitor.timeout,
'max_retries': monitor.max_retries,
'http_method': '',
'url_path': '',
'expected_codes': '',
'admin_state_up': monitor.admin_state_up}
res = self.client.post(reverse(self.ADDMONITOR_PATH), form_data)
self.assertFormErrors(res, 3)
def test_add_monitor_get(self):
res = self.client.get(reverse(self.ADDMONITOR_PATH))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.AddMonitor.name)
expected_objs = ['<AddMonitorStep: addmonitoraction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
def test_add_member_post(self):
self._test_add_member_post()
def test_add_member_post_without_weight(self):
self._test_add_member_post(with_weight=False)
@test.create_stubs({api.lbaas: ('pools_get', 'member_create'),
api.neutron: ('port_list',),
api.nova: ('server_list',)})
def _test_add_member_post(self, with_weight=True):
member = self.members.first()
server1 = self.AttributeDict({'id':
'12381d38-c3eb-4fee-9763-12de3338042e',
'name': 'vm1'})
server2 = self.AttributeDict({'id':
'12381d38-c3eb-4fee-9763-12de3338043e',
'name': 'vm2'})
port1 = self.AttributeDict(
{'fixed_ips': [{'ip_address': member.address}]})
api.lbaas.pools_get(IsA(http.HttpRequest)).AndReturn(self.pools.list())
api.nova.server_list(IsA(http.HttpRequest)).AndReturn(
[[server1, server2], False])
api.neutron.port_list(IsA(http.HttpRequest),
device_id=server1.id).AndReturn([port1, ])
params = {'pool_id': member.pool_id,
'address': member.address,
'protocol_port': member.protocol_port,
'members': [server1.id],
'admin_state_up': member.admin_state_up,
}
if with_weight:
params['weight'] = member.weight
api.lbaas.member_create(IsA(http.HttpRequest),
**params).AndReturn(lbaas.Member(member))
self.mox.ReplayAll()
form_data = {'pool_id': member.pool_id,
'address': member.address,
'protocol_port': member.protocol_port,
'members': [server1.id],
'admin_state_up': member.admin_state_up}
if with_weight:
form_data['weight'] = member.weight
res = self.client.post(reverse(self.ADDMEMBER_PATH), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('pools_get',),
api.nova: ('server_list',)})
def test_add_member_post_with_error(self):
member = self.members.first()
server1 = self.AttributeDict({'id':
'12381d38-c3eb-4fee-9763-12de3338042e',
'name': 'vm1'})
server2 = self.AttributeDict({'id':
'12381d38-c3eb-4fee-9763-12de3338043e',
'name': 'vm2'})
api.lbaas.pools_get(IsA(http.HttpRequest)).AndReturn(self.pools.list())
api.nova.server_list(IsA(http.HttpRequest)).AndReturn([[server1,
server2],
False])
self.mox.ReplayAll()
# try to create member with invalid protocol port and weight
form_data = {'pool_id': member.pool_id,
'address': member.address,
'protocol_port': 65536,
'weight': -1,
'members': [server1.id],
'admin_state_up': member.admin_state_up}
res = self.client.post(reverse(self.ADDMEMBER_PATH), form_data)
self.assertFormErrors(res, 2)
@test.create_stubs({api.lbaas: ('pools_get',),
api.nova: ('server_list',)})
def test_add_member_get(self):
server1 = self.AttributeDict({'id':
'12381d38-c3eb-4fee-9763-12de3338042e',
'name': 'vm1'})
server2 = self.AttributeDict({'id':
'12381d38-c3eb-4fee-9763-12de3338043e',
'name': 'vm2'})
api.lbaas.pools_get(IsA(http.HttpRequest)).AndReturn(self.pools.list())
api.nova.server_list(
IsA(http.HttpRequest)).AndReturn([[server1, server2], False])
self.mox.ReplayAll()
res = self.client.get(reverse(self.ADDMEMBER_PATH))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.AddMember.name)
expected_objs = ['<AddMemberStep: addmemberaction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
@test.create_stubs({api.lbaas: ('pool_get', 'pool_update')})
def test_update_pool_post(self):
pool = self.pools.first()
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
data = {'name': pool.name,
'description': pool.description,
'lb_method': pool.lb_method,
'admin_state_up': pool.admin_state_up}
api.lbaas.pool_update(IsA(http.HttpRequest), pool.id, pool=data)\
.AndReturn(pool)
self.mox.ReplayAll()
form_data = data.copy()
form_data.update({'pool_id': pool.id})
res = self.client.post(
reverse(self.UPDATEPOOL_PATH, args=(pool.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('pool_get',)})
def test_update_pool_get(self):
pool = self.pools.first()
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
self.mox.ReplayAll()
res = self.client.get(reverse(self.UPDATEPOOL_PATH, args=(pool.id,)))
self.assertTemplateUsed(res, 'project/loadbalancers/updatepool.html')
@test.create_stubs({api.lbaas: ('pools_get', 'vip_get',
'vip_update')})
def test_update_vip_post(self):
vip = self.vips.first()
api.lbaas.pools_get(IsA(http.HttpRequest)).AndReturn(self.pools.list())
api.lbaas.vip_get(IsA(http.HttpRequest), vip.id).AndReturn(vip)
data = {'name': vip.name,
'description': vip.description,
'pool_id': vip.pool_id,
'session_persistence': {},
'connection_limit': vip.connection_limit,
'admin_state_up': vip.admin_state_up}
api.lbaas.vip_update(IsA(http.HttpRequest), vip.id, vip=data)\
.AndReturn(vip)
self.mox.ReplayAll()
form_data = data.copy()
form_data.update({'vip_id': vip.id})
res = self.client.post(
reverse(self.UPDATEVIP_PATH, args=(vip.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('vip_get', 'pools_get')})
def test_update_vip_get(self):
vip = self.vips.first()
api.lbaas.pools_get(IsA(http.HttpRequest)).AndReturn(self.pools.list())
api.lbaas.vip_get(IsA(http.HttpRequest), vip.id).AndReturn(vip)
self.mox.ReplayAll()
res = self.client.get(reverse(self.UPDATEVIP_PATH, args=(vip.id,)))
self.assertTemplateUsed(res, 'project/loadbalancers/updatevip.html')
@test.create_stubs({api.lbaas: ('pools_get', 'member_get',
'member_update')})
def test_update_member_post(self):
member = self.members.first()
api.lbaas.pools_get(IsA(http.HttpRequest)).AndReturn(self.pools.list())
api.lbaas.member_get(IsA(http.HttpRequest), member.id)\
.AndReturn(member)
data = {'pool_id': member.pool_id,
'weight': member.weight,
'admin_state_up': member.admin_state_up}
api.lbaas.member_update(IsA(http.HttpRequest), member.id, member=data)\
.AndReturn(member)
self.mox.ReplayAll()
form_data = data.copy()
form_data.update({'member_id': member.id})
res = self.client.post(
reverse(self.UPDATEMEMBER_PATH, args=(member.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('member_get', 'pools_get')})
def test_update_member_get(self):
member = self.members.first()
api.lbaas.pools_get(IsA(http.HttpRequest)).AndReturn(self.pools.list())
api.lbaas.member_get(IsA(http.HttpRequest), member.id)\
.AndReturn(member)
self.mox.ReplayAll()
res = self.client.get(
reverse(self.UPDATEMEMBER_PATH, args=(member.id,)))
self.assertTemplateUsed(res, 'project/loadbalancers/updatemember.html')
@test.create_stubs({api.lbaas: ('pool_health_monitor_get',
'pool_health_monitor_update')})
def test_update_monitor_post(self):
monitor = self.monitors.first()
api.lbaas.pool_health_monitor_get(IsA(http.HttpRequest), monitor.id)\
.AndReturn(monitor)
data = {'delay': monitor.delay,
'timeout': monitor.timeout,
'max_retries': monitor.max_retries,
'admin_state_up': monitor.admin_state_up}
api.lbaas.pool_health_monitor_update(IsA(http.HttpRequest),
monitor.id, health_monitor=data).AndReturn(monitor)
self.mox.ReplayAll()
form_data = data.copy()
form_data.update({'monitor_id': monitor.id})
res = self.client.post(
reverse(self.UPDATEMONITOR_PATH, args=(monitor.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('pool_health_monitor_get',)})
def test_update_monitor_get(self):
monitor = self.monitors.first()
api.lbaas.pool_health_monitor_get(IsA(http.HttpRequest), monitor.id)\
.AndReturn(monitor)
self.mox.ReplayAll()
res = self.client.get(
reverse(self.UPDATEMONITOR_PATH, args=(monitor.id,)))
self.assertTemplateUsed(
res, 'project/loadbalancers/updatemonitor.html')
@test.create_stubs({api.lbaas: ('pool_get', 'pool_health_monitors_get',
'pool_monitor_association_create')})
def test_add_pool_monitor_association_post(self):
pool = self.pools.first()
monitors = self.monitors.list()
monitor = self.monitors.list()[1]
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
api.lbaas.pool_health_monitors_get(
IsA(http.HttpRequest)).AndReturn(monitors)
api.lbaas.pool_monitor_association_create(
IsA(http.HttpRequest),
monitor_id=monitor.id,
pool_id=pool.id,
pool_monitors=pool.health_monitors,
pool_name=pool.name).AndReturn(None)
self.mox.ReplayAll()
form_data = {'monitor_id': monitor.id,
'pool_id': pool.id,
'pool_monitors': pool.health_monitors,
'pool_name': pool.name}
res = self.client.post(
reverse(self.ADDASSOC_PATH, args=(pool.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('pool_get', 'pool_health_monitors_get')})
def test_add_pool_monitor_association_get(self):
pool = self.pools.first()
monitors = self.monitors.list()
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
api.lbaas.pool_health_monitors_get(
IsA(http.HttpRequest)).AndReturn(monitors)
self.mox.ReplayAll()
res = self.client.get(reverse(self.ADDASSOC_PATH, args=(pool.id,)))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.AddPMAssociation.name)
expected_objs = ['<AddPMAssociationStep: addpmassociationaction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
@test.create_stubs({api.lbaas: ('pool_get',
'pool_monitor_association_delete')})
def test_delete_pool_monitor_association_post(self):
pool = self.pools.first()
monitor = self.monitors.first()
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
api.lbaas.pool_monitor_association_delete(
IsA(http.HttpRequest),
monitor_id=monitor.id,
pool_id=pool.id,
pool_monitors=pool.health_monitors,
pool_name=pool.name).AndReturn(None)
self.mox.ReplayAll()
form_data = {'monitor_id': monitor.id,
'pool_id': pool.id,
'pool_monitors': pool.health_monitors,
'pool_name': pool.name}
res = self.client.post(
reverse(self.DELETEASSOC_PATH, args=(pool.id,)), form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, str(self.INDEX_URL))
@test.create_stubs({api.lbaas: ('pool_get',)})
def test_delete_pool_monitor_association_get(self):
pool = self.pools.first()
api.lbaas.pool_get(IsA(http.HttpRequest), pool.id).AndReturn(pool)
self.mox.ReplayAll()
res = self.client.get(
reverse(self.DELETEASSOC_PATH, args=(pool.id,)))
workflow = res.context['workflow']
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertEqual(workflow.name, workflows.DeletePMAssociation.name)
expected_objs = [
'<DeletePMAssociationStep: deletepmassociationaction>', ]
self.assertQuerysetEqual(workflow.steps, expected_objs)
| {
"content_hash": "1d25d76a62546d82eebe0372e46fedcb",
"timestamp": "",
"source": "github",
"line_count": 839,
"max_line_length": 79,
"avg_line_length": 38.80333730631705,
"alnum_prop": 0.5684973583978375,
"repo_name": "Havate/havate-openstack",
"id": "37b456310d27a1a626d2335b65ec5f17733184ed",
"size": "32601",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "proto-build/gui/horizon/Horizon_GUI/openstack_dashboard/dashboards/project/loadbalancers/tests.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "407618"
},
{
"name": "HTML",
"bytes": "507406"
},
{
"name": "JavaScript",
"bytes": "25322"
},
{
"name": "Makefile",
"bytes": "6165"
},
{
"name": "Python",
"bytes": "21665856"
},
{
"name": "Shell",
"bytes": "62617"
}
],
"symlink_target": ""
} |
"""Caching of formatted files with feature-based invalidation."""
import os
import pickle
import tempfile
from pathlib import Path
from typing import Dict, Iterable, Set, Tuple
from platformdirs import user_cache_dir
from _black_version import version as __version__
from black.mode import Mode
# types
Timestamp = float
FileSize = int
CacheInfo = Tuple[Timestamp, FileSize]
Cache = Dict[str, CacheInfo]
def get_cache_dir() -> Path:
"""Get the cache directory used by black.
Users can customize this directory on all systems using `BLACK_CACHE_DIR`
environment variable. By default, the cache directory is the user cache directory
under the black application.
This result is immediately set to a constant `black.cache.CACHE_DIR` as to avoid
repeated calls.
"""
# NOTE: Function mostly exists as a clean way to test getting the cache directory.
default_cache_dir = user_cache_dir("black", version=__version__)
cache_dir = Path(os.environ.get("BLACK_CACHE_DIR", default_cache_dir))
return cache_dir
CACHE_DIR = get_cache_dir()
def read_cache(mode: Mode) -> Cache:
"""Read the cache if it exists and is well formed.
If it is not well formed, the call to write_cache later should resolve the issue.
"""
cache_file = get_cache_file(mode)
if not cache_file.exists():
return {}
with cache_file.open("rb") as fobj:
try:
cache: Cache = pickle.load(fobj)
except (pickle.UnpicklingError, ValueError, IndexError):
return {}
return cache
def get_cache_file(mode: Mode) -> Path:
return CACHE_DIR / f"cache.{mode.get_cache_key()}.pickle"
def get_cache_info(path: Path) -> CacheInfo:
"""Return the information used to check if a file is already formatted or not."""
stat = path.stat()
return stat.st_mtime, stat.st_size
def filter_cached(cache: Cache, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]:
"""Split an iterable of paths in `sources` into two sets.
The first contains paths of files that modified on disk or are not in the
cache. The other contains paths to non-modified files.
"""
todo, done = set(), set()
for src in sources:
res_src = src.resolve()
if cache.get(str(res_src)) != get_cache_info(res_src):
todo.add(src)
else:
done.add(src)
return todo, done
def write_cache(cache: Cache, sources: Iterable[Path], mode: Mode) -> None:
"""Update the cache file."""
cache_file = get_cache_file(mode)
try:
CACHE_DIR.mkdir(parents=True, exist_ok=True)
new_cache = {
**cache,
**{str(src.resolve()): get_cache_info(src) for src in sources},
}
with tempfile.NamedTemporaryFile(dir=str(cache_file.parent), delete=False) as f:
pickle.dump(new_cache, f, protocol=4)
os.replace(f.name, cache_file)
except OSError:
pass
| {
"content_hash": "d120d42cff256518a52a870162a1fd14",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 88,
"avg_line_length": 30.371134020618555,
"alnum_prop": 0.6578411405295316,
"repo_name": "psf/black",
"id": "9455ff4477250b78e279040124dcc7215e2e351b",
"size": "2946",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/black/cache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "929"
},
{
"name": "Jupyter Notebook",
"bytes": "2848"
},
{
"name": "Python",
"bytes": "4932376"
},
{
"name": "Vim Script",
"bytes": "9445"
}
],
"symlink_target": ""
} |
'''
mali_extract.py -
======================================================
:Author: Andreas Heger
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
.. todo::
describe purpose of the script.
Usage
-----
Example::
python mali_extract.py --help
Type::
python mali_extract.py --help
for command line help.
Command line options
--------------------
'''
import sys
import string
import getopt
import CGAT.Experiment as E
import CGAT.MaliIO as MaliIO
USAGE = """python %s [OPTIONS] < mali > filtered
Extract sequences from a multiple alignment
Version = $Id: mali_extract.py 2782 2009-09-10 11:40:29Z andreas $
Options:
-h, --help print this message.
-v, --verbose= loglevel.
-s, --subset= subset of ids to select
-c, --components filename with components to be analyses separately in the multiple alignment
""" % sys.argv[0]
param_long_options = ["verbose=", "help", "subset=", "components=", "version"]
param_short_options = "v:ho:s:c::"
param_loglevel = 1
param_gap_char = "-"
param_mask_char = "x"
param_subset = None
param_filename_components = None
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
try:
optlist, args = getopt.getopt(
sys.argv[1:], param_short_options, param_long_options)
except getopt.error, msg:
print USAGE, msg
sys.exit(2)
for o, a in optlist:
if o in ("-v", "--verbose"):
param_loglevel = int(a)
elif o in ("--version", ):
print "version="
sys.exit(0)
elif o in ("-h", "--help"):
print USAGE
sys.exit(0)
elif o in ("-s", "--subset"):
param_subset = a
elif o == ("-c", "--components"):
param_filename_components = a
if param_loglevel >= 1:
print E.GetHeader()
print E.GetParams()
# 1. read multiple alignment in fasta format
all_mali, all_identifiers = MaliIO.readFasta(sys.stdin)
if len(all_identifiers) == 0:
raise "alignment is empty."
if param_loglevel >= 1:
print "# read mali with %i entries." % len(all_identifiers)
if param_filename_components:
infile = open(param_filename_components, "r")
components = {}
for line in infile:
if line[0] == "#":
continue
if line[0] == ">":
continue
a, b = line[:-1].split("\t")[:2]
if b not in components:
components[b] = []
components[b].append(a)
if param_loglevel >= 1:
print "# read %i components." % len(components)
if param_subset:
components = {'all': string.split(param_subset, ",")}
for key, identifiers in components.items():
# 1. remove gaps in multiple alignment
mali = MaliIO.removeGappedColumns(MaliIO.getSubset(all_mali, identifiers),
param_gap_char)
for i in identifiers:
print ">%s\n%s\n" % (i, mali[i])
if param_loglevel >= 1:
print E.GetFooter()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| {
"content_hash": "810633131d183d823af4ca5bd5fb700f",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 108,
"avg_line_length": 23.37323943661972,
"alnum_prop": 0.5438385055739681,
"repo_name": "CGATOxford/Optic",
"id": "e755f0a4ca0eae7876e297797ad5d34c3f668131",
"size": "3319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/mali_extract.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gnuplot",
"bytes": "1800"
},
{
"name": "Perl",
"bytes": "18661"
},
{
"name": "Python",
"bytes": "3188910"
},
{
"name": "Shell",
"bytes": "1641"
}
],
"symlink_target": ""
} |
import itertools
import re
from optparse import OptionParser
from bigdl.dataset import news20
from bigdl.nn.layer import *
from bigdl.nn.criterion import *
from bigdl.optim.optimizer import *
from bigdl.util.common import *
from bigdl.util.common import Sample
def text_to_words(review_text):
letters_only = re.sub("[^a-zA-Z]", " ", review_text)
words = letters_only.lower().split()
return words
def analyze_texts(data_rdd):
def index(w_c_i):
((w, c), i) = w_c_i
return (w, (i + 1, c))
return data_rdd.flatMap(lambda text_label: text_to_words(text_label[0])) \
.map(lambda word: (word, 1)).reduceByKey(lambda a, b: a + b) \
.sortBy(lambda w_c: - w_c[1]).zipWithIndex() \
.map(lambda w_c_i: index(w_c_i)).collect()
# pad([1, 2, 3, 4, 5], 0, 6)
def pad(l, fill_value, width):
if len(l) >= width:
return l[0: width]
else:
l.extend([fill_value] * (width - len(l)))
return l
def to_vec(token, b_w2v, embedding_dim):
if token in b_w2v:
return b_w2v[token]
else:
return pad([], 0, embedding_dim)
def to_sample(vectors, label, embedding_dim):
# flatten nested list
flatten_features = list(itertools.chain(*vectors))
features = np.array(flatten_features, dtype='float').reshape(
[sequence_len, embedding_dim])
if model_type.lower() == "cnn":
features = features.transpose(1, 0)
return Sample.from_ndarray(features, np.array(label))
def build_model(class_num):
model = Sequential()
if model_type.lower() == "cnn":
model.add(Reshape([embedding_dim, 1, sequence_len]))
model.add(SpatialConvolution(embedding_dim, 128, 5, 1))
model.add(ReLU())
model.add(SpatialMaxPooling(5, 1, 5, 1))
model.add(SpatialConvolution(128, 128, 5, 1))
model.add(ReLU())
model.add(SpatialMaxPooling(5, 1, 5, 1))
model.add(Reshape([128]))
elif model_type.lower() == "lstm":
model.add(Recurrent()
.add(LSTM(embedding_dim, 128, p)))
model.add(Select(2, -1))
elif model_type.lower() == "gru":
model.add(Recurrent()
.add(GRU(embedding_dim, 128, p)))
model.add(Select(2, -1))
else:
raise ValueError('model can only be cnn, lstm, or gru')
model.add(Linear(128, 100))
model.add(Linear(100, class_num))
model.add(LogSoftMax())
return model
def train(sc, data_path,
batch_size,
sequence_len, max_words, embedding_dim, training_split):
print('Processing text dataset')
texts = news20.get_news20(source_dir=data_path)
data_rdd = sc.parallelize(texts, 2)
word_to_ic = analyze_texts(data_rdd)
# Only take the top wc between [10, sequence_len]
word_to_ic = dict(word_to_ic[10: max_words])
bword_to_ic = sc.broadcast(word_to_ic)
w2v = news20.get_glove_w2v(dim=embedding_dim)
filtered_w2v = dict((w, v) for w, v in w2v.items() if w in word_to_ic)
bfiltered_w2v = sc.broadcast(filtered_w2v)
tokens_rdd = data_rdd.map(lambda text_label:
([w for w in text_to_words(text_label[0]) if
w in bword_to_ic.value], text_label[1]))
padded_tokens_rdd = tokens_rdd.map(
lambda tokens_label: (pad(tokens_label[0], "##", sequence_len), tokens_label[1]))
vector_rdd = padded_tokens_rdd.map(lambda tokens_label:
([to_vec(w, bfiltered_w2v.value,
embedding_dim) for w in
tokens_label[0]], tokens_label[1]))
sample_rdd = vector_rdd.map(
lambda vectors_label: to_sample(vectors_label[0], vectors_label[1], embedding_dim))
train_rdd, val_rdd = sample_rdd.randomSplit(
[training_split, 1-training_split])
optimizer = Optimizer(
model=build_model(news20.CLASS_NUM),
training_rdd=train_rdd,
criterion=ClassNLLCriterion(),
end_trigger=MaxEpoch(max_epoch),
batch_size=batch_size,
optim_method=Adagrad(learningrate=0.01, learningrate_decay=0.0002))
optimizer.set_validation(
batch_size=batch_size,
val_rdd=val_rdd,
trigger=EveryEpoch(),
val_method=[Top1Accuracy()]
)
train_model = optimizer.optimize()
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-a", "--action", dest="action", default="train")
parser.add_option("-b", "--batchSize", dest="batchSize", default="128")
parser.add_option("-e", "--embedding_dim", dest="embedding_dim", default="50") # noqa
parser.add_option("-m", "--max_epoch", dest="max_epoch", default="15")
parser.add_option("--model", dest="model_type", default="cnn")
parser.add_option("-p", "--p", dest="p", default="0.0")
parser.add_option("-d", "--data_path", dest="data_path", default="/tmp/news20/")
(options, args) = parser.parse_args(sys.argv)
if options.action == "train":
batch_size = int(options.batchSize)
embedding_dim = int(options.embedding_dim)
max_epoch = int(options.max_epoch)
p = float(options.p)
model_type = options.model_type
sequence_len = 50
max_words = 1000
training_split = 0.8
sc = SparkContext(appName="text_classifier",
conf=create_spark_conf())
data_path = options.data_path
redire_spark_logs()
show_bigdl_info_logs()
init_engine()
train(sc, data_path,
batch_size,
sequence_len, max_words, embedding_dim, training_split)
sc.stop()
elif options.action == "test":
pass
| {
"content_hash": "1dde017ef77d000213c5483740e34f72",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 91,
"avg_line_length": 34.96341463414634,
"alnum_prop": 0.591384722706662,
"repo_name": "jenniew/BigDL",
"id": "922ac4a2aefdd390e3f4d948264d6541fbc74529",
"size": "6322",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyspark/bigdl/models/textclassifier/textclassifier.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "6829"
},
{
"name": "Lua",
"bytes": "1904"
},
{
"name": "Python",
"bytes": "654411"
},
{
"name": "RobotFramework",
"bytes": "10720"
},
{
"name": "Scala",
"bytes": "5697448"
},
{
"name": "Shell",
"bytes": "50738"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Version.supports_python3'
db.add_column('package_version', 'supports_python3',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Version.supports_python3'
db.delete_column('package_version', 'supports_python3')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'package.category': {
'Meta': {'ordering': "['title']", 'object_name': 'Category'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'show_pypi': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': "'50'"}),
'title_plural': ('django.db.models.fields.CharField', [], {'max_length': "'50'", 'blank': 'True'})
},
'package.commit': {
'Meta': {'ordering': "['-commit_date']", 'object_name': 'Commit'},
'commit_date': ('django.db.models.fields.DateTimeField', [], {}),
'commit_hash': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '150', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['package.Package']"})
},
'package.package': {
'Meta': {'ordering': "['title']", 'object_name': 'Package'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['package.Category']"}),
'commit_list': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'creator'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modifier'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'participants': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'pypi_downloads': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pypi_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'repo_commits': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'repo_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'repo_forks': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'repo_url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200', 'blank': 'True'}),
'repo_watchers': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': "'100'"}),
'usage': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
'package.packageexample': {
'Meta': {'ordering': "['title']", 'object_name': 'PackageExample'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['package.Package']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': "'100'"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'package.version': {
'Meta': {'ordering': "['-upload_time']", 'object_name': 'Version'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'development_status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'downloads': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'max_length': "'100'"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': "'100'", 'blank': "''"}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['package.Package']", 'null': 'True', 'blank': 'True'}),
'supports_python3': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'upload_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['package'] | {
"content_hash": "b1951f73024725ed85a6bc2a0b398584",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 206,
"avg_line_length": 77.712,
"alnum_prop": 0.5541486514309244,
"repo_name": "miketheman/opencomparison",
"id": "373d6c5f7f42fcb5a4fa909cbb8984c9d6cd2519",
"size": "9738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "package/migrations/0021_auto__add_field_version_supports_python3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "30750"
},
{
"name": "JavaScript",
"bytes": "291393"
},
{
"name": "Python",
"bytes": "611089"
}
],
"symlink_target": ""
} |
"""
Sample Python/Pygame Programs
Simpson College Computer Science
http://programarcadegames.com/
http://simpson.edu/computer-science/
Explanation video: http://youtu.be/4YqIKncMJNs
Explanation video: http://youtu.be/ONAK8VZIcI4
Explanation video: http://youtu.be/_6c4o41BIms
"""
import pygame
# Define some colors
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
# Call this function so the Pygame library can initialize itself
pygame.init()
# Create an 800x600 sized screen
screen = pygame.display.set_mode([800, 600])
clock = pygame.time.Clock()
# Set positions of graphics
background_position = [0, 0]
# Load and set up graphics.
player_image = pygame.image.load("hornet.jpg").convert()
player_image.set_colorkey(WHITE)
done = False
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
# Copy image to screen:
# Get the current mouse position. This returns the position
# as a list of two numbers.
player_position = pygame.mouse.get_pos()
x = player_position[0]
y = player_position[1]
# Copy image to screen:
screen.blit(player_image, [x, y])
pygame.display.flip()
clock.tick(60)
pygame.quit() | {
"content_hash": "e7d2e96c93c99abc8cf5102c4887beb0",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 64,
"avg_line_length": 21.295081967213115,
"alnum_prop": 0.648960739030023,
"repo_name": "treefroog/SQ42-Clone",
"id": "06f77f7514849ecca5743edbc2579ddb4e94a679",
"size": "1299",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "image_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13289"
}
],
"symlink_target": ""
} |
import logging
import logging.config
import os.path
from configparser import ConfigParser
import os
def load_config() -> ConfigParser:
app_config = ConfigParser()
app_config.read_dict({
'bot': {'token': os.getenv('CF_BOT_TOKEN')},
'db': {'url': os.getenv('CF_DB_URL')},
'updates': {'mode': 'polling'}
})
return app_config
def setup_logging():
log_config = ConfigParser()
log_config.read(os.path.join('resources', 'cfg', 'logging.conf'), encoding=encoding)
user_log_config = os.getenv('LOGGING_CONFIG_PATH', './logging.conf')
if os.path.exists(user_log_config) and os.path.isfile(user_log_config):
log_config.read(user_log_config, encoding=encoding)
logging.config.fileConfig(log_config)
encoding = 'utf-8'
setup_logging()
config = load_config()
# IOC
from .db import DB
db = DB(config['db'])
from src.repository import *
user_repository = UserRepository()
channel_repository = ChannelRepository()
subscription_repository = SubscriptionRepository()
from src.service import *
subscriptions = Subscriptions()
settings = Settings()
| {
"content_hash": "9f68b2aec08ce8b9a7182fdd944d2f68",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 88,
"avg_line_length": 24.108695652173914,
"alnum_prop": 0.6889089269612263,
"repo_name": "telegram-bots/telegram-channels-feed",
"id": "f5ed41a5e0a9029cc2cd363a5ff2fac45b5e18c2",
"size": "1118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bot/src/component/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1179"
},
{
"name": "Kotlin",
"bytes": "52732"
},
{
"name": "PLpgSQL",
"bytes": "2021"
},
{
"name": "Python",
"bytes": "28891"
},
{
"name": "Shell",
"bytes": "184"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web', '0002_auto_20160818_0509'),
]
operations = [
migrations.AddField(
model_name='song',
name='play_count',
field=models.IntegerField(blank=True, default=0, null=True),
),
]
| {
"content_hash": "cc072e073d46603b04c884e91caad938",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 72,
"avg_line_length": 22,
"alnum_prop": 0.5934343434343434,
"repo_name": "appendjeff/pianobar-client",
"id": "416f8463dab8b6531496d293d42abd5b2bcc8e6e",
"size": "467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/migrations/0003_song_play_count.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3861"
},
{
"name": "HTML",
"bytes": "9094"
},
{
"name": "JavaScript",
"bytes": "25231"
},
{
"name": "Python",
"bytes": "20183"
},
{
"name": "Shell",
"bytes": "654"
}
],
"symlink_target": ""
} |
from django import template
from ..views import BrokenException
register = template.Library()
@register.simple_tag
def go_boom(arg):
raise BrokenException(arg)
| {
"content_hash": "95a203a0d99ad192c9818a12bf9f473b",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 35,
"avg_line_length": 17.8,
"alnum_prop": 0.7247191011235955,
"repo_name": "yephper/django",
"id": "407e74c5c2675d92a206e5ff9e55f04bf0a98d44",
"size": "178",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/view_tests/templatetags/debugtags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "1538"
},
{
"name": "CSS",
"bytes": "1697381"
},
{
"name": "HTML",
"bytes": "390772"
},
{
"name": "Java",
"bytes": "588"
},
{
"name": "JavaScript",
"bytes": "3172126"
},
{
"name": "Makefile",
"bytes": "134"
},
{
"name": "PHP",
"bytes": "19336"
},
{
"name": "Python",
"bytes": "13365273"
},
{
"name": "Shell",
"bytes": "837"
},
{
"name": "Smarty",
"bytes": "133"
}
],
"symlink_target": ""
} |
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "lumixmaptool"
copyright = "2015, Martin Thoma"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0.13"
# The full version, including alpha/beta/rc tags.
release = "1.0.13"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "lumixmaptooldoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
"index",
"lumixmaptool.tex",
"lumixmaptool Documentation",
"Martin Thoma",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
("index", "lumixmaptool", "lumixmaptool Documentation", ["Martin Thoma"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"lumixmaptool",
"lumixmaptool Documentation",
"Martin Thoma",
"lumixmaptool",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"http://docs.python.org/": None}
| {
"content_hash": "8b22e5f7f8c90c02f87e9f45ba30cb74",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 80,
"avg_line_length": 31.263358778625953,
"alnum_prop": 0.6918569161274569,
"repo_name": "MartinThoma/lumixmaptool",
"id": "e0a61e40cfc2c46ad08e049d657c96fc0eb87793",
"size": "8615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "652"
},
{
"name": "Python",
"bytes": "20246"
}
],
"symlink_target": ""
} |
from utils.base_estimator import BaseEstimator
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
np.random.seed(2046)
def euclidean_distance(x, y):
return np.sqrt(np.sum((x - y) ** 2))
class KMeans(BaseEstimator):
"""Partition a dataset into K clusters.
Finds clusters by repeatedly assigning each data point to the cluster with
the nearest centroid and iterating until the assignments converge (meaning
they don't change during an iteration) or the maximum number of iterations
is reached.
Init centroids by randomly select k values from the dataset
For better method to improve convergence rates and avoid degenerate cases.
See: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Parameters
----------
K : int, default 8
The number of clusters into which the dataset is partitioned.
max_iters: int, default 300
The maximum iterations of assigning points to the nearest cluster.
Short-circuited by the assignments converging on their own.
"""
def __init__(self, K=8, max_iters=300):
self.K = K
self.max_iters = max_iters
# an array of cluster that each data point belongs to
self.labels = []
# an array of center value of cluster
self.centroids = []
def _init_cetroids(self):
"""Set the initial centroids."""
indices = np.random.choice(self.n_samples, self.K, replace=False)
self.centroids = self.X[indices]
def _dist_from_centers(self):
return np.array([min([euclidean_distance(x, c) for c in self.centroids]) for x in self.X])
def fit(self, X=None):
"""Perform the clustering on the given dataset."""
self._setup_input(X, y_required=False)
self._init_cetroids()
for i in range(self.max_iters):
new_centroids = []
# update clusters base on new centroids
new_labels = np.apply_along_axis(self._closest_cluster, 1, self.X)
# update centroids base on new clusters
for k in range(self.K):
centroid = np.mean(self.X[new_labels == k], axis=0)
new_centroids.append(centroid)
if self._is_converged(self.centroids, new_centroids):
print('Converged on iteration %s' % (i + 1))
break
# not converged yet, update centroids / labels to new centroids / labels
self.labels = new_labels
self.centroids = new_centroids
def _predict(self, X=None):
return np.apply_along_axis(self._closest_cluster, 1, X)
def _closest_cluster(self, data_point):
""" Return the closest cluster index and distance given data point"""
closest_index = 0
closest_distance = float("inf")
for cluster_i, centroid in enumerate(self.centroids):
distance = euclidean_distance(data_point, centroid)
if distance < closest_distance:
closest_distance = distance
closest_index = cluster_i
return closest_index
def _is_converged(self, centroids, new_centroids):
return True if sum([euclidean_distance(centroids[i], new_centroids[i]) for i in range(self.K)]) == 0 else False
def plot(self, data=None):
if data is None:
data = self.X
for k in range(self.K):
points = data[self.labels == k].T
plt.scatter(*points, c=sns.color_palette("hls", self.K + 1)[k])
for point in self.centroids:
plt.scatter(*point, marker='x', linewidths=10)
| {
"content_hash": "93f7ea1860d064f9f93f193c8f25577c",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 119,
"avg_line_length": 37,
"alnum_prop": 0.625945945945946,
"repo_name": "transedward/ml-playground",
"id": "95e45df8b6fc11a0ec82a6e5dde3e2e6055201bb",
"size": "3700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unsupervised/kmeans.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "3167015"
},
{
"name": "Python",
"bytes": "76537"
}
],
"symlink_target": ""
} |
"""Sensor from an SQL Query."""
import datetime
import decimal
import logging
import sqlalchemy
from sqlalchemy.orm import scoped_session, sessionmaker
import voluptuous as vol
from homeassistant.components.recorder import CONF_DB_URL, DEFAULT_DB_FILE, DEFAULT_URL
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import CONF_NAME, CONF_UNIT_OF_MEASUREMENT, CONF_VALUE_TEMPLATE
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_COLUMN_NAME = "column"
CONF_QUERIES = "queries"
CONF_QUERY = "query"
def validate_sql_select(value):
"""Validate that value is a SQL SELECT query."""
if not value.lstrip().lower().startswith("select"):
raise vol.Invalid("Only SELECT queries allowed")
return value
_QUERY_SCHEME = vol.Schema(
{
vol.Required(CONF_COLUMN_NAME): cv.string,
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_QUERY): vol.All(cv.string, validate_sql_select),
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_QUERIES): [_QUERY_SCHEME], vol.Optional(CONF_DB_URL): cv.string}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the SQL sensor platform."""
db_url = config.get(CONF_DB_URL)
if not db_url:
db_url = DEFAULT_URL.format(hass_config_path=hass.config.path(DEFAULT_DB_FILE))
try:
engine = sqlalchemy.create_engine(db_url)
sessmaker = scoped_session(sessionmaker(bind=engine))
# Run a dummy query just to test the db_url
sess = sessmaker()
sess.execute("SELECT 1;")
except sqlalchemy.exc.SQLAlchemyError as err:
_LOGGER.error("Couldn't connect using %s DB_URL: %s", db_url, err)
return
finally:
sess.close()
queries = []
for query in config.get(CONF_QUERIES):
name = query.get(CONF_NAME)
query_str = query.get(CONF_QUERY)
unit = query.get(CONF_UNIT_OF_MEASUREMENT)
value_template = query.get(CONF_VALUE_TEMPLATE)
column_name = query.get(CONF_COLUMN_NAME)
if value_template is not None:
value_template.hass = hass
# MSSQL uses TOP and not LIMIT
if not ("LIMIT" in query_str or "SELECT TOP" in query_str):
query_str = (
query_str.replace("SELECT", "SELECT TOP 1")
if "mssql" in db_url
else query_str.replace(";", " LIMIT 1;")
)
sensor = SQLSensor(
name, sessmaker, query_str, column_name, unit, value_template
)
queries.append(sensor)
add_entities(queries, True)
class SQLSensor(SensorEntity):
"""Representation of an SQL sensor."""
def __init__(self, name, sessmaker, query, column, unit, value_template):
"""Initialize the SQL sensor."""
self._name = name
self._query = query
self._unit_of_measurement = unit
self._template = value_template
self._column_name = column
self.sessionmaker = sessmaker
self._state = None
self._attributes = None
@property
def name(self):
"""Return the name of the query."""
return self._name
@property
def state(self):
"""Return the query's current state."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return self._attributes
def update(self):
"""Retrieve sensor data from the query."""
data = None
try:
sess = self.sessionmaker()
result = sess.execute(self._query)
self._attributes = {}
if not result.returns_rows or result.rowcount == 0:
_LOGGER.warning("%s returned no results", self._query)
self._state = None
return
for res in result:
_LOGGER.debug("result = %s", res.items())
data = res[self._column_name]
for key, value in res.items():
if isinstance(value, decimal.Decimal):
value = float(value)
if isinstance(value, datetime.date):
value = str(value)
self._attributes[key] = value
except sqlalchemy.exc.SQLAlchemyError as err:
_LOGGER.error("Error executing query %s: %s", self._query, err)
return
finally:
sess.close()
if data is not None and self._template is not None:
self._state = self._template.async_render_with_possible_json_value(
data, None
)
else:
self._state = data
| {
"content_hash": "7342816d40d39b22d9ec087b7cfada31",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 88,
"avg_line_length": 31.375,
"alnum_prop": 0.600996015936255,
"repo_name": "adrienbrault/home-assistant",
"id": "a537b160d0b367e7ed5216f7a9894ec20df90468",
"size": "5020",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sql/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
import codecs, json, networkx as nx, operator, os, unittest
import databasic.logic.connectthedots as ctd
import databasic.logic.filehandler as filehandler
from csvkit import table
from functools import reduce
class ConnectTheDotsTest(unittest.TestCase):
"""
Unit testing suite for ConnectTheDots
"""
def setUp(self):
self._fixtures_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'fixtures')
def test_count_nodes(self):
test_data_path = os.path.join(self._fixtures_dir, 'les-miserables.csv')
results = ctd.get_summary(test_data_path)
self.assertEqual(results['nodes'], 77) # len(set(self.table[0] + self.table[1]))
def test_count_edges(self):
test_data_path = os.path.join(self._fixtures_dir, 'les-miserables.csv')
results = ctd.get_summary(test_data_path)
self.assertEqual(results['edges'], 254) # self.table.count_rows()
def test_import_xls(self):
test_data_path = os.path.join(self._fixtures_dir, 'zachary-karate-club.xlsx')
csv_file = filehandler.convert_to_csv(test_data_path)[0]
results = ctd.get_summary(csv_file)
self.assertEqual(results['nodes'], 34)
self.assertEqual(results['edges'], 78)
def test_import_no_header(self):
test_data_path = os.path.join(self._fixtures_dir, 'handshake-problem.csv')
results = ctd.get_summary(test_data_path, False)
self.assertEqual(results['nodes'], 5)
self.assertEqual(results['edges'], 10)
def test_invalid_import(self):
test_data_path = os.path.join(self._fixtures_dir, 'invalid-graph.csv')
results = ctd.get_summary(test_data_path)
self.assertEqual(results, {})
def test_clustering_score(self):
"""
Test global clustering score with generalized formula
This is the average of the local clustering scores for each node v:
2 Nv where Kv = degree
C(v) = ---------- Nv = number of edges between
Kv (Kv - 1) the neighbors of v
"""
test_data_path = os.path.join(self._fixtures_dir, 'les-miserables.csv')
results = ctd.get_summary(test_data_path)
graph = ctd.get_graph(test_data_path)
local_scores = []
for v in graph.nodes():
k = graph.degree(v)
neighbor_links = []
for u in nx.all_neighbors(graph, v):
neighbor_links += [tuple(sorted((u, w))) for w in nx.common_neighbors(graph, u, v)]
n = len(list(set(neighbor_links)))
local_scores.append(2 * n / float(k * (k - 1))) if k > 1 else local_scores.append(0)
self.assertAlmostEqual(results['clustering'], sum(local_scores) / float(len(local_scores)))
def test_clustering_score_star(self):
test_data_path = os.path.join(self._fixtures_dir, 'simple-network.csv')
results = ctd.get_summary(test_data_path)
self.assertEqual(results['clustering'], 0) # no clusters, neighbors are never connected
def test_clustering_score_clique(self):
test_data_path = os.path.join(self._fixtures_dir, 'handshake-problem.csv')
results = ctd.get_summary(test_data_path, False)
self.assertEqual(results['clustering'], 1) # complete graph, all nodes connected
def test_density_score(self):
test_data_path = os.path.join(self._fixtures_dir, 'les-miserables.csv')
results = ctd.get_summary(test_data_path)
self.assertEqual(results['density'], 0.08680792891319207) # float(2 * self.count_edges()) /
# (count_nodes() * (self.count_nodes() - 1))
def test_centrality_scores(self):
"""
Test betweenness centrality with generalized formula
For a node v and every other node pair (s, t), we take the proportion of shortest paths s => t that include
v and then normalize the sum of all the proportions by dividing (N - 1)(N - 2) / 2, the number of node pairs
"""
test_data_path = os.path.join(self._fixtures_dir, 'les-miserables.csv')
results = ctd.get_summary(test_data_path)
graph = ctd.get_graph(test_data_path)
table = results['table']
self.assertEqual(table[0]['id'], 'Valjean')
nodes = graph.nodes()
nodes.remove('Valjean')
betweenness_centrality = 0
visited_paths = []
for u in nodes:
for v in nodes:
current_path = tuple(sorted((u, v)))
if u == v or current_path in visited_paths:
continue
else:
visited_paths.append(current_path)
paths = list(nx.all_shortest_paths(graph, u, v))
total_paths = len(paths)
paths_with_valjean = reduce(lambda n, path: n + 1 if 'Valjean' in path else n, paths, 0)
betweenness_centrality += paths_with_valjean / float(total_paths)
node_pairs = len(nodes) * (len(nodes) - 1) / float(2)
normalized_score = betweenness_centrality / node_pairs
self.assertAlmostEqual(table[0]['centrality'], normalized_score)
def test_centrality_scores_simple(self):
"""
Test betweenness centrality for simple (independently verifiable) case
A D
> C < All shortest paths go through C, connector score = 1
B E
"""
test_data_path = os.path.join(self._fixtures_dir, 'simple-network.csv')
results = ctd.get_summary(test_data_path)
table = results['table']
self.assertEqual(table[0]['id'], 'C')
self.assertEqual(table[0]['centrality'], 1)
for i in range(1, 5):
self.assertEqual(table[i]['centrality'], 0)
def test_degree_scores(self):
test_data_path = os.path.join(self._fixtures_dir, 'les-miserables.csv')
results = ctd.get_summary(test_data_path)
table = sorted(results['table'], key=operator.itemgetter('degree'), reverse=True)
self.assertEqual(table[0]['id'], 'Valjean')
self.assertEqual(table[0]['degree'], 36) # counted manually
def test_degree_scores_simple(self):
"""
Test degree scores for simple (independently verifiable) case
A D
> C < All nodes have degree 1 except for C, which has degree 4
B E
"""
test_data_path = os.path.join(self._fixtures_dir, 'simple-network.csv')
results = ctd.get_summary(test_data_path)
table = sorted(results['table'], key=operator.itemgetter('degree'), reverse=True)
self.assertEqual(table[0]['id'], 'C')
self.assertEqual(table[0]['degree'], 4)
for i in range(1, 5):
self.assertEqual(table[i]['degree'], 1)
def test_as_json_nodes(self):
test_data_path = os.path.join(self._fixtures_dir, 'simple-network.csv')
results = ctd.get_summary(test_data_path)
data = json.loads(results['json'])
nodes = sorted(data['nodes'], key=operator.itemgetter('id')) # [A, B, C, D, E]
self.assertEqual(len(nodes), 5)
for n in [0, 1, 3, 4]:
self.assertEqual(nodes[n]['degree'], 1)
self.assertEqual(nodes[n]['centrality'], 0)
self.assertEqual(nodes[2]['degree'], 4)
self.assertEqual(nodes[2]['centrality'], 1)
def test_as_json_edges(self):
test_data_path = os.path.join(self._fixtures_dir, 'simple-network.csv')
results = ctd.get_summary(test_data_path)
data = json.loads(results['json'])
nodes = data['nodes']
edges = sorted(data['links'], key=lambda e: (nodes[e['source']]['id'], nodes[e['target']]['id']))
self.assertEqual(len(edges), 4)
self.assertEqual(nodes[edges[0]['source']]['id'], 'A')
self.assertEqual(nodes[edges[0]['target']]['id'], 'C')
targets = ['B', 'D', 'E']
for n in range(1, 4):
self.assertEqual(nodes[edges[n]['source']]['id'], 'C')
self.assertEqual(nodes[edges[n]['target']]['id'], targets[n - 1])
def test_as_gexf(self):
test_data_path = os.path.join(self._fixtures_dir, 'les-miserables.csv')
results = ctd.get_summary(test_data_path)
test_gexf_path = os.path.join(self._fixtures_dir, 'graph.gexf')
with open(test_gexf_path, 'r') as gexf:
contents = gexf.read()
self.assertEqual(contents, results['gexf'])
def test_is_bipartite_candidate(self):
test_data_path = os.path.join(self._fixtures_dir, 'southern-women.csv')
results = ctd.get_summary(test_data_path)
data = json.loads(results['json'])
nodes = data['nodes']
cols = {'BRENDA': 0, 'CHARLOTTE': 0, 'DOROTHY': 0, 'ELEANOR': 0, 'EVELYN': 0, 'FLORA': 0,
'FRANCES': 0, 'HELEN': 0, 'KATHERINE': 0, 'LAURA': 0, 'MYRNA': 0, 'NORA': 0,
'OLIVIA': 0, 'PEARL': 0, 'RUTH': 0, 'SYLVIA': 0, 'THERESA': 0, 'VERNE': 0,
'E1': 1, 'E10': 1, 'E11': 1, 'E12': 1, 'E13': 1, 'E14': 1, 'E2': 1, 'E3': 1,
'E4': 1, 'E5': 1, 'E6': 1, 'E7': 1, 'E8': 1, 'E9': 1}
self.assertTrue(results['bipartite'])
for n in nodes:
self.assertEqual(n['column'], cols[n['id']])
def test_is_not_bipartite_candidate(self):
test_data_path = os.path.join(self._fixtures_dir, 'simple-network.csv')
results = ctd.get_summary(test_data_path)
data = json.loads(results['json'])
nodes = data['nodes']
self.assertFalse(results['bipartite'])
for n in nodes:
self.assertNotIn('column', n)
def test_large_file(self):
test_data_path = os.path.join(self._fixtures_dir, 'airline-routes.csv')
results = ctd.get_summary(test_data_path)
self.assertEqual(results['nodes'], 3425)
self.assertEqual(results['edges'], 19257)
self.assertTrue(results['large_dataset'])
table_path = os.path.join(self._fixtures_dir, 'airline-routes-centralities.csv')
table_file = codecs.open(table_path, 'r')
bc_table = table.Table.from_csv(table_file, no_header_row=False, snifflimit=0)
bc_rows = bc_table.to_rows()
bc_estimates = {}
for row in results['table'][:40]:
bc_estimates[row['id']] = row['centrality']
for row in bc_rows:
if row[0] in bc_estimates:
self.assertAlmostEqual(bc_estimates[row[0]], row[1], places=2) # accurate to two decimal places | {
"content_hash": "d2a35774f1ed6e9de2bbf9311826d335",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 116,
"avg_line_length": 43.142276422764226,
"alnum_prop": 0.588429284839348,
"repo_name": "c4fcm/DataBasic",
"id": "b2c15a0a5618283f39e414b9b7d7e6af6a275363",
"size": "10613",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "databasic/logic/test/connectthedotstest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "27938"
},
{
"name": "HTML",
"bytes": "87657"
},
{
"name": "JavaScript",
"bytes": "821416"
},
{
"name": "Python",
"bytes": "126684"
},
{
"name": "Shell",
"bytes": "3013"
}
],
"symlink_target": ""
} |
"""
sir.py:
Example of a Susceptible-Infectious-Recovered epidemiological
cellular automaton model implemented on a hexagonal grid using stochastic
pair-transition rules.
GT Sep 2014
"""
from __future__ import print_function
_DEBUG = False
import time
from landlab import HexModelGrid
from numpy import where, logical_and
from landlab.components.cellular_automata.celllab_cts import Transition, CAPlotter
from landlab.components.cellular_automata.hex_cts import HexCTS
import pylab
def setup_transition_list(infection_rate):
"""
Creates and returns a list of Transition() objects to represent state
transitions for the SIR model.
Parameters
----------
(none)
Returns
-------
xn_list : list of Transition objects
List of objects that encode information about the link-state transitions.
Notes
-----
The states and transitions are as follows:
Pair state Transition to Process
========== ============= =======
0 (0-0) (none) -
1 (0-1) 4 (1-1) infection
2 (0-2) recovery
2 (0-2) (none) -
3 (1-0) 4 (1-1) infection
6 (2-0) recovery
4 (1-1) 5 (1-2) recovery
6 (2-1) recovery
5 (1-2) 8 (2-2) recovery
6 (2-0) (none) -
7 (2-1) 8 (2-2) recovery
8 (2-2) (none) -
"""
xn_list = []
xn_list.append( Transition((0,1,0), (1,1,0), infection_rate, 'infection') )
xn_list.append( Transition((0,1,0), (0,2,0), 1., 'recovery') )
xn_list.append( Transition((1,0,0), (1,1,0), infection_rate, 'infection') )
xn_list.append( Transition((1,0,0), (2,0,0), 1., 'recovery') )
xn_list.append( Transition((1,1,0), (1,2,0), 1., 'recovery') )
xn_list.append( Transition((1,1,0), (2,1,0), 1., 'recovery') )
xn_list.append( Transition((1,2,0), (2,2,0), 1., 'recovery') )
xn_list.append( Transition((2,1,0), (2,2,0), 1., 'recovery') )
if _DEBUG:
print()
print('setup_transition_list(): list has',len(xn_list),'transitions:')
for t in xn_list:
print(' From state',t.from_state,'to state',t.to_state,'at rate',t.rate,'called',t.name)
return xn_list
def main():
# INITIALIZE
# User-defined parameters
nr = 80
nc = 41
plot_interval = 0.25
run_duration = 5.0
report_interval = 5.0 # report interval, in real-time seconds
infection_rate = 3.0
outfilename = 'sirmodel'+str(int(infection_rate))+'ir'
# Remember the clock time, and calculate when we next want to report
# progress.
current_real_time = time.time()
next_report = current_real_time + report_interval
time_slice = 0
# Create a grid
hmg = HexModelGrid(nr, nc, 1.0)
# Set up the states and pair transitions.
# Transition data here represent the disease status of a population.
ns_dict = { 0 : 'susceptible', 1 : 'infectious', 2: 'recovered' }
xn_list = setup_transition_list(infection_rate)
# Create data and initialize values
node_state_grid = hmg.add_zeros('node', 'node_state_grid')
wid = nc-1.0
ht = (nr-1.0)*0.866
is_middle_rows = logical_and(hmg.node_y>=0.4*ht, hmg.node_y<=0.5*ht)
is_middle_cols = logical_and(hmg.node_x>=0.4*wid, hmg.node_x<=0.6*wid)
middle_area = where(logical_and(is_middle_rows, is_middle_cols))[0]
node_state_grid[middle_area] = 1
node_state_grid[0] = 2 # to force full color range, set lower left to 'recovered'
# Create the CA model
ca = HexCTS(hmg, ns_dict, xn_list, node_state_grid)
# Set up the color map
import matplotlib
susceptible_color = (0.5, 0.5, 0.5) # gray
infectious_color = (0.05, 0.0, 0.0) # dark red
recovered_color = (0.95, 0.95, 1.0) # white w/ faint blue
clist = [susceptible_color, infectious_color, recovered_color]
my_cmap = matplotlib.colors.ListedColormap(clist)
# Create a CAPlotter object for handling screen display
ca_plotter = CAPlotter(ca, cmap=my_cmap)
# Plot the initial grid
ca_plotter.update_plot()
pylab.axis('off')
savename = outfilename+'0'
pylab.savefig(savename+'.pdf', format='pdf')
# RUN
current_time = 0.0
while current_time < run_duration:
# Once in a while, print out simulation and real time to let the user
# know that the sim is running ok
current_real_time = time.time()
if current_real_time >= next_report:
print('Current sim time',current_time,'(',100*current_time/run_duration,'%)')
next_report = current_real_time + report_interval
# Run the model forward in time until the next output step
ca.run(current_time+plot_interval, ca.node_state,
plot_each_transition=False) #True, plotter=ca_plotter)
current_time += plot_interval
# Plot the current grid
ca_plotter.update_plot()
pylab.axis('off')
time_slice += 1
savename = outfilename+str(time_slice)
pylab.savefig(savename+'.pdf', format='pdf')
# FINALIZE
# Plot
ca_plotter.finalize()
if __name__=='__main__':
main()
| {
"content_hash": "f34165993e6535136c329b79335496ee",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 101,
"avg_line_length": 33.27439024390244,
"alnum_prop": 0.577240241891149,
"repo_name": "landlab/pub_tucker_etal_gmd",
"id": "2dc316e906b220f806117f1c8f3aa61155ce7ef1",
"size": "5475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codes/sir.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "96641"
}
],
"symlink_target": ""
} |
: from .template import page
: from . import table_class, table_style, caption_args
: from .helpers.helpers import aa_link
: import datetime
: def whatsnewtemplate title, ctx, days, newlist
: using page title, ctx, lang="en"
<div class='container'>
<div class='row table-responsive'>
<div class="col-md-12">
: table_class.append('sortable')
<table class="#{' '.join(table_class)}" style="#{' '.join(table_style)}" id='whatsnew'>
<caption #{caption_args}>${ctx.format_decimal(ctx.new_counts.new_count)} tracks added in the last ${ctx.time_ago(days, add_direction=False)}</caption>
<thead>
<tr><th>Artist</th><th>New Tracks</th><th>Size</th><th>Total Play Time</th><th>Date Added</th></tr>
</thead>
<tbody>
: for count, total_time, song_size, song in newlist
<tr>
: use aa_link song.artist, 'artist', td=True, new_only=True
<td data-value='${count}'>${ctx.format_decimal(count)}</td>
<td data-value='${song_size}'>${ctx.format_size(song_size)}</td>
<td data-value='${total_time}'>${ctx.format_time(total_time)}</td>
<td data-value='${song.addition_time}'>${ctx.time_ago(datetime.datetime.fromtimestamp(song.addition_time))}</td>
</tr>
: end
</tbody>
</table>
</div>
</div>
</div>
: end
: end
| {
"content_hash": "5de64116e8901cebec7175d16f41a254",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 159,
"avg_line_length": 43.42424242424242,
"alnum_prop": 0.5694347522679692,
"repo_name": "bmillham/djrq2",
"id": "9207382008250926fbcec34b9f46ad4e17ef3600",
"size": "1452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/app/djrq/templates/whatsnew.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "22622"
},
{
"name": "JavaScript",
"bytes": "59510"
},
{
"name": "Python",
"bytes": "267514"
},
{
"name": "Shell",
"bytes": "1030"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division, absolute_import
# Non-std. lib imports
from PySide.QtGui import QErrorMessage
error = QErrorMessage()
def toolTipText(text):
'''Creates text for a tool tip'''
return '<html>'+text+'</html>'
| {
"content_hash": "9f4a15792f5d15cdf0b9e02dd33acda1",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 64,
"avg_line_length": 25,
"alnum_prop": 0.708,
"repo_name": "jensengrouppsu/rapid",
"id": "30c8d279f3efd4b8025f490f95d2a27cb9aa7f28",
"size": "250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rapid/gui/guicommon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "1752"
},
{
"name": "Python",
"bytes": "2039250"
}
],
"symlink_target": ""
} |
import struct
from ryu.base import app_manager
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_0
from ryu.lib import dpid as dpid_lib
from ryu.lib import stplib
from ryu.lib.mac import haddr_to_str
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
class RyuFurioso(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION]
_CONTEXTS = {'stplib': stplib.Stp}
def __init__(self, *args, **kwargs):
super(RyuFurioso, self).__init__(*args, **kwargs)
self.mac_to_port = {}
self.stp = kwargs['stplib']
def add_flow(self, datapath, in_port, dst, actions):
ofproto = datapath.ofproto
wildcards = ofproto_v1_0.OFPFW_ALL
wildcards &= ~ofproto_v1_0.OFPFW_IN_PORT
wildcards &= ~ofproto_v1_0.OFPFW_DL_DST
match = datapath.ofproto_parser.OFPMatch(
wildcards, in_port, 0, dst,
0, 0, 0, 0, 0, 0, 0, 0, 0)
mod = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath, match=match, cookie=0,
command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=ofproto.OFP_DEFAULT_PRIORITY,
flags=ofproto.OFPFF_SEND_FLOW_REM, actions=actions)
datapath.send_msg(mod)
def delete_flow(self, datapath):
ofproto = datapath.ofproto
wildcards = ofproto_v1_0.OFPFW_ALL
match = datapath.ofproto_parser.OFPMatch(
wildcards, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
mod = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath, match=match, cookie=0,
command=ofproto.OFPFC_DELETE)
datapath.send_msg(mod)
@set_ev_cls(stplib.EventPacketIn, MAIN_DISPATCHER)
def packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
pkt = packet.Packet(msg.data)
eth = pkt.get_protocol(ethernet.ethernet)
packet_dst = eth.dst #mac addr from dst
packet_src = eth.src #mac addr from src
if not check_if_registered(packet_src): #drop packet if not from authorized host
print("{} is NOT authorized for communication".format(packet_src))
return
else:
print("{} is authorized for communication".format(packet_src))
dst, src, _eth_type = struct.unpack_from('!6s6sH', buffer(msg.data), 0)
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
self.logger.debug("packet in %s %s %s %s",
dpid, haddr_to_str(src), haddr_to_str(dst),
msg.in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = msg.in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
actions = [datapath.ofproto_parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
self.add_flow(datapath, msg.in_port, dst, actions)
out = datapath.ofproto_parser.OFPPacketOut(
datapath=datapath, buffer_id=msg.buffer_id, in_port=msg.in_port,
actions=actions)
datapath.send_msg(out)
@set_ev_cls(stplib.EventTopologyChange, MAIN_DISPATCHER)
def _topology_change_handler(self, ev):
dp = ev.dp
dpid_str = dpid_lib.dpid_to_str(dp.id)
msg = 'Receive topology change event. Flush MAC table.'
self.logger.debug("[dpid=%s] %s", dpid_str, msg)
if dp.id in self.mac_to_port:
del self.mac_to_port[dp.id]
self.delete_flow(dp)
@set_ev_cls(stplib.EventPortStateChange, MAIN_DISPATCHER)
def _port_state_change_handler(self, ev):
dpid_str = dpid_lib.dpid_to_str(ev.dp.id)
of_state = {stplib.PORT_STATE_DISABLE: 'DISABLE',
stplib.PORT_STATE_BLOCK: 'BLOCK',
stplib.PORT_STATE_LISTEN: 'LISTEN',
stplib.PORT_STATE_LEARN: 'LEARN',
stplib.PORT_STATE_FORWARD: 'FORWARD'}
self.logger.debug("[dpid=%s][port=%d] state=%s",
dpid_str, ev.port_no, of_state[ev.port_state])
def check_if_registered(mac):
checkfile = open("hosts.txt", "r")
checkfile_content = checkfile.read()
counter = checkfile_content.count(mac)
if counter > 0:
return True
else:
return False
| {
"content_hash": "4ff35c88d8b5a848a0165477fcbe2ba4",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 90,
"avg_line_length": 35.796875,
"alnum_prop": 0.6047577477084243,
"repo_name": "ArthurFreitas/Projeto_redes",
"id": "1ba41529957149fac30a8bfd52db709f874efece",
"size": "4582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RYU/hadouken.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "40981"
},
{
"name": "C++",
"bytes": "50107"
},
{
"name": "CMake",
"bytes": "182580"
},
{
"name": "Makefile",
"bytes": "174309"
},
{
"name": "Python",
"bytes": "121799"
},
{
"name": "Shell",
"bytes": "17862"
}
],
"symlink_target": ""
} |
import requests, json
def getSong(artist, title):
''' This function creates a search of the artist and title. By using Spotify's API we can search through Spotify's library and return the result. If there is no match en error-message will appear. '''
searchQuery = artist + " " + title
searchQuery = searchQuery.replace(" ", "%20")
searchQuery = searchQuery.replace("&", "")
searchQuery = searchQuery.replace("'", "")
r = requests.get("https://api.spotify.com/v1/search?q=" + searchQuery + "&type=track")
infoSearch = json.loads(r.content)
formatted = infoSearch['tracks']['items']
if not formatted:
return {'error': 'No information available'}
else:
return formatted
def getSongExtended(artist, title, auth):
searchQuery = artist + " " + title
searchQuery = searchQuery.replace(" ", "%20")
searchQuery = searchQuery.replace("&", "")
searchQuery = searchQuery.replace("'", "")
r = requests.get("https://api.spotify.com/v1/search?q=" + searchQuery + "&type=track", headers=auth)
infoSearch = json.loads(r.content)
formatted = infoSearch['tracks']['items']
if not formatted:
return {'error': 'No information available'}
else:
return formatted
def getArtist(artist):
searchQuery = artist
searchQuery = searchQuery.replace(" ", "%20")
r = requests.get("https://api.spotify.com/v1/search?q=" + searchQuery + "&type=track")
infoSearch = json.loads(r.content)
formatted = infoSearch['tracks']['items']
if not formatted:
return {'error': 'No information available'}
else:
return formatted
def getMe(authHeader):
r = requests.get("https://api.spotify.com/v1/me", headers=authHeader)
result = json.loads(r.content)
return result
| {
"content_hash": "481bd6e36249a704bbff2b53d583c491",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 204,
"avg_line_length": 38.170212765957444,
"alnum_prop": 0.6549609810479375,
"repo_name": "mariaholmberg283/WebbtjansterGruppDiggarn",
"id": "f2e3a7150db4f9ae3d0ed3e2ba15603da95f6fcf",
"size": "1794",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spotify_communication.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2064"
},
{
"name": "HTML",
"bytes": "16412"
},
{
"name": "Python",
"bytes": "11197"
}
],
"symlink_target": ""
} |
"""Tests for autoreload extension.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import sys
import tempfile
import shutil
import random
import time
import nose.tools as nt
import IPython.testing.tools as tt
from IPython.extensions.autoreload import AutoreloadMagics
from IPython.core.events import EventManager, pre_run_cell
from IPython.utils.py3compat import PY3
if PY3:
from io import StringIO
else:
from StringIO import StringIO
#-----------------------------------------------------------------------------
# Test fixture
#-----------------------------------------------------------------------------
noop = lambda *a, **kw: None
class FakeShell(object):
def __init__(self):
self.ns = {}
self.events = EventManager(self, {'pre_run_cell', pre_run_cell})
self.auto_magics = AutoreloadMagics(shell=self)
self.events.register('pre_run_cell', self.auto_magics.pre_run_cell)
register_magics = set_hook = noop
def run_code(self, code):
self.events.trigger('pre_run_cell')
exec(code, self.ns)
self.auto_magics.post_execute_hook()
def push(self, items):
self.ns.update(items)
def magic_autoreload(self, parameter):
self.auto_magics.autoreload(parameter)
def magic_aimport(self, parameter, stream=None):
self.auto_magics.aimport(parameter, stream=stream)
self.auto_magics.post_execute_hook()
class Fixture(object):
"""Fixture for creating test module files"""
test_dir = None
old_sys_path = None
filename_chars = "abcdefghijklmopqrstuvwxyz0123456789"
def setUp(self):
self.test_dir = tempfile.mkdtemp()
self.old_sys_path = list(sys.path)
sys.path.insert(0, self.test_dir)
self.shell = FakeShell()
def tearDown(self):
shutil.rmtree(self.test_dir)
sys.path = self.old_sys_path
self.test_dir = None
self.old_sys_path = None
self.shell = None
def get_module(self):
module_name = "tmpmod_" + \
"".join(random.sample(self.filename_chars, 20))
if module_name in sys.modules:
del sys.modules[module_name]
file_name = os.path.join(self.test_dir, module_name + ".py")
return module_name, file_name
def write_file(self, filename, content):
"""
Write a file, and force a timestamp difference of at least one second
Notes
-----
Python's .pyc files record the timestamp of their compilation
with a time resolution of one second.
Therefore, we need to force a timestamp difference between .py
and .pyc, without having the .py file be timestamped in the
future, and without changing the timestamp of the .pyc file
(because that is stored in the file). The only reliable way
to achieve this seems to be to sleep.
"""
# Sleep one second + eps
time.sleep(1.05)
# Write
f = open(filename, 'w')
try:
f.write(content)
finally:
f.close()
def new_module(self, code):
mod_name, mod_fn = self.get_module()
f = open(mod_fn, 'w')
try:
f.write(code)
finally:
f.close()
return mod_name, mod_fn
#-----------------------------------------------------------------------------
# Test automatic reloading
#-----------------------------------------------------------------------------
class TestAutoreload(Fixture):
def _check_smoketest(self, use_aimport=True):
"""
Functional test for the automatic reloader using either
'%autoreload 1' or '%autoreload 2'
"""
mod_name, mod_fn = self.new_module("""
x = 9
z = 123 # this item will be deleted
def foo(y):
return y + 3
class Baz(object):
def __init__(self, x):
self.x = x
def bar(self, y):
return self.x + y
@property
def quux(self):
return 42
def zzz(self):
'''This method will be deleted below'''
return 99
class Bar: # old-style class: weakref doesn't work for it on Python < 2.7
def foo(self):
return 1
""")
#
# Import module, and mark for reloading
#
if use_aimport:
self.shell.magic_autoreload("1")
self.shell.magic_aimport(mod_name)
stream = StringIO()
self.shell.magic_aimport("", stream=stream)
nt.assert_in(
("Modules to reload:\n%s" % mod_name), stream.getvalue())
with nt.assert_raises(ImportError):
self.shell.magic_aimport("tmpmod_as318989e89ds")
else:
self.shell.magic_autoreload("2")
self.shell.run_code("import %s" % mod_name)
stream = StringIO()
self.shell.magic_aimport("", stream=stream)
nt.assert_true("Modules to reload:\nall-except-skipped" in
stream.getvalue())
nt.assert_in(mod_name, self.shell.ns)
mod = sys.modules[mod_name]
#
# Test module contents
#
old_foo = mod.foo
old_obj = mod.Baz(9)
old_obj2 = mod.Bar()
def check_module_contents():
nt.assert_equal(mod.x, 9)
nt.assert_equal(mod.z, 123)
nt.assert_equal(old_foo(0), 3)
nt.assert_equal(mod.foo(0), 3)
obj = mod.Baz(9)
nt.assert_equal(old_obj.bar(1), 10)
nt.assert_equal(obj.bar(1), 10)
nt.assert_equal(obj.quux, 42)
nt.assert_equal(obj.zzz(), 99)
obj2 = mod.Bar()
nt.assert_equal(old_obj2.foo(), 1)
nt.assert_equal(obj2.foo(), 1)
check_module_contents()
#
# Simulate a failed reload: no reload should occur and exactly
# one error message should be printed
#
self.write_file(mod_fn, """
a syntax error
""")
with tt.AssertPrints(('[autoreload of %s failed:' % mod_name), channel='stderr'):
self.shell.run_code("pass") # trigger reload
with tt.AssertNotPrints(('[autoreload of %s failed:' % mod_name), channel='stderr'):
self.shell.run_code("pass") # trigger another reload
check_module_contents()
#
# Rewrite module (this time reload should succeed)
#
self.write_file(mod_fn, """
x = 10
def foo(y):
return y + 4
class Baz(object):
def __init__(self, x):
self.x = x
def bar(self, y):
return self.x + y + 1
@property
def quux(self):
return 43
class Bar: # old-style class
def foo(self):
return 2
""")
def check_module_contents():
nt.assert_equal(mod.x, 10)
nt.assert_false(hasattr(mod, 'z'))
nt.assert_equal(old_foo(0), 4) # superreload magic!
nt.assert_equal(mod.foo(0), 4)
obj = mod.Baz(9)
nt.assert_equal(old_obj.bar(1), 11) # superreload magic!
nt.assert_equal(obj.bar(1), 11)
nt.assert_equal(old_obj.quux, 43)
nt.assert_equal(obj.quux, 43)
nt.assert_false(hasattr(old_obj, 'zzz'))
nt.assert_false(hasattr(obj, 'zzz'))
obj2 = mod.Bar()
nt.assert_equal(old_obj2.foo(), 2)
nt.assert_equal(obj2.foo(), 2)
self.shell.run_code("pass") # trigger reload
check_module_contents()
#
# Another failure case: deleted file (shouldn't reload)
#
os.unlink(mod_fn)
self.shell.run_code("pass") # trigger reload
check_module_contents()
#
# Disable autoreload and rewrite module: no reload should occur
#
if use_aimport:
self.shell.magic_aimport("-" + mod_name)
stream = StringIO()
self.shell.magic_aimport("", stream=stream)
nt.assert_true(("Modules to skip:\n%s" % mod_name) in
stream.getvalue())
# This should succeed, although no such module exists
self.shell.magic_aimport("-tmpmod_as318989e89ds")
else:
self.shell.magic_autoreload("0")
self.write_file(mod_fn, """
x = -99
""")
self.shell.run_code("pass") # trigger reload
self.shell.run_code("pass")
check_module_contents()
#
# Re-enable autoreload: reload should now occur
#
if use_aimport:
self.shell.magic_aimport(mod_name)
else:
self.shell.magic_autoreload("")
self.shell.run_code("pass") # trigger reload
nt.assert_equal(mod.x, -99)
def test_smoketest_aimport(self):
self._check_smoketest(use_aimport=True)
def test_smoketest_autoreload(self):
self._check_smoketest(use_aimport=False)
| {
"content_hash": "40b86be6eca6a0fb6d10775e840f00e6",
"timestamp": "",
"source": "github",
"line_count": 327,
"max_line_length": 92,
"avg_line_length": 28.69724770642202,
"alnum_prop": 0.5346334185848253,
"repo_name": "mattvonrocketstein/smash",
"id": "3bd7afb0769bd09aca8d4b6fe8a6638384caf434",
"size": "9384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smashlib/ipy3x/extensions/tests/test_autoreload.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "162188"
},
{
"name": "HTML",
"bytes": "32106"
},
{
"name": "JavaScript",
"bytes": "1615935"
},
{
"name": "Makefile",
"bytes": "550"
},
{
"name": "Python",
"bytes": "4934398"
},
{
"name": "Shell",
"bytes": "2990"
}
],
"symlink_target": ""
} |
from oslo_log import log as logging
from django.core.exceptions import ValidationError
from django.utils import safestring
from django.utils.translation import gettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard.api import neutron
from sahara_dashboard.api import sahara as saharaclient
from sahara_dashboard import utils
LOG = logging.getLogger(__name__)
class Parameter(object):
def __init__(self, config):
self.name = config['name']
self.description = config.get('description', "No description")
self.required = not config['is_optional']
self.default_value = config.get('default_value', None)
self.initial_value = self.default_value
self.param_type = config['config_type']
self.priority = int(config.get('priority', 2))
self.choices = config.get('config_values', None)
def build_control(parameter):
attrs = {"priority": parameter.priority,
"placeholder": parameter.default_value}
if parameter.param_type == "string":
return forms.CharField(
widget=forms.TextInput(attrs=attrs),
label=parameter.name,
required=(parameter.required and
parameter.default_value is None),
help_text=parameter.description,
initial=parameter.initial_value)
if parameter.param_type == "int":
return forms.IntegerField(
widget=forms.TextInput(attrs=attrs),
label=parameter.name,
required=parameter.required,
help_text=parameter.description,
initial=parameter.initial_value)
elif parameter.param_type == "bool":
return forms.BooleanField(
widget=forms.CheckboxInput(attrs=attrs),
label=parameter.name,
required=False,
initial=parameter.initial_value,
help_text=parameter.description)
elif parameter.param_type == "dropdown":
return forms.ChoiceField(
widget=forms.Select(attrs=attrs),
label=parameter.name,
required=parameter.required,
choices=parameter.choices,
help_text=parameter.description)
def _create_step_action(name, title, parameters, advanced_fields=None,
service=None):
class_fields = {}
contributes_field = ()
for param in parameters:
field_name = "CONF:" + service + ":" + param.name
contributes_field += (field_name,)
class_fields[field_name] = build_control(param)
if advanced_fields is not None:
for ad_field_name, ad_field_value in advanced_fields:
class_fields[ad_field_name] = ad_field_value
action_meta = type('Meta', (object, ),
dict(help_text_template=("nodegroup_templates/"
"_fields_help.html")))
class_fields['Meta'] = action_meta
action = type(str(title),
(workflows.Action,),
class_fields)
step_meta = type('Meta', (object,), dict(name=title))
step = type(str(name),
(workflows.Step, ),
dict(name=name,
process_name=name,
action_class=action,
contributes=contributes_field,
Meta=step_meta))
return step
def build_node_group_fields(action, name, template, count, serialized=None):
action.fields[name] = forms.CharField(
label=_("Name"),
widget=forms.TextInput())
action.fields[template] = forms.CharField(
label=_("Node group cluster"),
widget=forms.HiddenInput())
action.fields[count] = forms.IntegerField(
label=_("Count"),
min_value=0,
widget=forms.HiddenInput())
action.fields[serialized] = forms.CharField(
widget=forms.HiddenInput())
def build_interface_argument_fields(
action, name, description, mapping_type, location, value_type,
required, default_value):
action.fields[name] = forms.CharField(
label=_("Name"),
widget=forms.TextInput(),
required=True)
action.fields[description] = forms.CharField(
label=_("Description"),
widget=forms.TextInput(),
required=False)
action.fields[mapping_type] = forms.ChoiceField(
label=_("Mapping Type"),
widget=forms.Select(),
required=True,
choices=[("args", _("Positional Argument")),
("configs", _("Configuration Value")),
("params", _("Named Parameter"))])
action.fields[location] = forms.CharField(
label=_("Location"),
widget=forms.TextInput(),
required=True)
action.fields[value_type] = forms.ChoiceField(
label=_("Value Type"),
widget=forms.Select(),
required=True,
choices=[("string", _("String")),
("number", _("Number")),
("data_source", _("Data Source"))])
action.fields[required] = forms.BooleanField(
widget=forms.CheckboxInput(),
label=_("Required"),
required=False,
initial=True)
action.fields[default_value] = forms.CharField(
label=_("Default Value"),
widget=forms.TextInput(),
required=False)
def parse_configs_from_context(context, defaults):
configs_dict = dict()
for key, val in context.items():
if str(key).startswith("CONF"):
key_split = str(key).split(":")
service = key_split[1]
config = key_split[2]
if service not in configs_dict:
configs_dict[service] = dict()
if val is None:
continue
if str(defaults[service][config]) == str(val):
continue
configs_dict[service][config] = val
return configs_dict
def get_security_groups(request, security_group_ids):
security_groups = []
for group in security_group_ids or []:
try:
security_groups.append(neutron.security_group_get(
request, group))
except Exception:
LOG.info(_('Unable to retrieve security group %(group)s.') %
{'group': group})
security_groups.append({'name': group})
return security_groups
def get_plugin_and_hadoop_version(request):
plugin_name = None
hadoop_version = None
# In some cases request contains valuable info in both GET and POST methods
req = request.GET.copy()
req.update(request.POST)
if req.get("plugin_name"):
plugin_name = req["plugin_name"]
hadoop_version = (
req.get("plugin_version", None) or req["hadoop_version"]
)
return plugin_name, hadoop_version
def clean_node_group(node_group):
node_group_copy = dict((key, value)
for key, value in node_group.items() if value)
for key in ["id", "created_at", "updated_at"]:
if key in node_group_copy:
node_group_copy.pop(key)
return node_group_copy
def populate_image_choices(self, request, context, empty_choice=False):
try:
all_images = saharaclient.image_list(request)
plugin, hadoop_version = get_plugin_and_hadoop_version(request)
details = saharaclient.plugin_get_version_details(request,
plugin,
hadoop_version)
choices = [(image.id, image.name) for image in all_images
if (set(details.required_image_tags).
issubset(set(image.tags)))]
except Exception:
exceptions.handle(request,
_("Unable to fetch image choices."))
choices = []
if empty_choice:
choices = [(None, _('No image specified'))] + choices
if not choices:
choices.append(("", _("No Images Available")))
return choices
class PluginAndVersionMixin(object):
def _generate_plugin_version_fields(self, sahara):
plugins = [p for p in sahara.plugins.list()
if is_plugin_not_hidden_for_user(p)]
plugin_choices = [(plugin.name, plugin.title) for plugin in plugins]
self.fields["plugin_name"] = forms.ChoiceField(
label=_("Plugin Name"),
choices=plugin_choices,
widget=forms.Select(
attrs={"class": "plugin_name_choice switchable",
'data-slug': 'pluginname'}))
for plugin in plugins:
versions = [(version, version)
for version in get_enabled_versions(plugin)]
version_choices = sorted(
versions,
reverse=True,
key=lambda v: utils.smart_sort_helper(v[0]))
field_name = plugin.name + "_version"
choice_field = forms.ChoiceField(
label=_("Version"),
choices=version_choices,
widget=forms.Select(
attrs={"class": "plugin_version_choice switched "
+ field_name + "_choice",
"data-switch-on": "pluginname",
"data-pluginname-%s" % plugin.name: _("Version")})
)
self.fields[field_name] = choice_field
class PatchedDynamicWorkflow(workflows.Workflow):
"""Overrides Workflow to fix its issues."""
def _ensure_dynamic_exist(self):
if not hasattr(self, 'dynamic_steps'):
self.dynamic_steps = list()
def _register_step(self, step):
# Use that method instead of 'register' to register step.
# Note that a step could be registered in descendant class constructor
# only before this class constructor is invoked.
self._ensure_dynamic_exist()
self.dynamic_steps.append(step)
def _order_steps(self):
# overrides method of Workflow
# crutch to fix https://bugs.launchpad.net/horizon/+bug/1196717
# and another not filed issue that dynamic creation of tabs is
# not thread safe
self._ensure_dynamic_exist()
self._registry = dict([(step, step(self))
for step in self.dynamic_steps])
return list(self.default_steps) + self.dynamic_steps
class ServiceParametersWorkflow(PatchedDynamicWorkflow):
"""Base class for Workflows having services tabs with parameters."""
def _populate_tabs(self, general_parameters, service_parameters):
# Populates tabs for 'general' and service parameters
# Also populates defaults and initial values
self.defaults = dict()
self._init_step('general', 'General Parameters', general_parameters)
for service, parameters in service_parameters.items():
self._init_step(service, service + ' Parameters', parameters)
def _init_step(self, service, title, parameters):
if not parameters:
return
self._populate_initial_values(service, parameters)
step = _create_step_action(service, title=title, parameters=parameters,
service=service)
self.defaults[service] = dict()
for param in parameters:
self.defaults[service][param.name] = param.default_value
self._register_step(step)
def _set_configs_to_copy(self, configs):
self.configs_to_copy = configs
def _populate_initial_values(self, service, parameters):
if not hasattr(self, 'configs_to_copy'):
return
configs = self.configs_to_copy
for param in parameters:
if (service in configs and
param.name in configs[service]):
param.initial_value = configs[service][param.name]
class StatusFormatMixin(workflows.Workflow):
def __init__(self, request, context_seed, entry_point, *args, **kwargs):
super(StatusFormatMixin, self).__init__(request,
context_seed,
entry_point,
*args,
**kwargs)
def format_status_message(self, message):
error_description = getattr(self, 'error_description', None)
if error_description:
return error_description
else:
return message % self.context[self.name_property]
class ShareWidget(forms.MultiWidget):
def __init__(self, choices=()):
widgets = []
for choice in choices:
widgets.append(forms.CheckboxInput(
attrs={
"label": choice[1],
"value": choice[0],
}))
widgets.append(forms.TextInput())
widgets.append(forms.Select(
choices=(("rw", _("Read/Write")), ("ro", _("Read only")))))
super(ShareWidget, self).__init__(widgets)
def decompress(self, value):
if value:
values = []
for share in value:
values.append(value[share]["id"])
values.append(value[share]["path"])
values.append(value[share]["access_level"])
return values
return [None] * len(self.widgets)
def format_output(self, rendered_widgets):
output = []
output.append("<table>")
output.append("<tr><th>Share</th><th>Enabled</th>"
"<th>Path</th><th>Permissions</th></tr>")
for i, widget in enumerate(rendered_widgets):
item_widget_index = i % 3
if item_widget_index == 0:
output.append("<tr>")
output.append(
"<td class='col-sm-2 small-padding'>%s</td>" %
self.widgets[i].attrs["label"])
# The last 2 form field td need get a larger size
if item_widget_index in [1, 2]:
size = 4
else:
size = 2
output.append("<td class='col-sm-%s small-padding'>" % size
+ widget + "</td>")
if item_widget_index == 2:
output.append("</tr>")
output.append("</table>")
return safestring.mark_safe('\n'.join(output))
class MultipleShareChoiceField(forms.MultipleChoiceField):
def validate(self, value):
if self.required and not value:
raise ValidationError(
self.error_messages['required'], code='required')
if not isinstance(value, list):
raise ValidationError(
_("The value of shares must be a list of values")
)
def is_plugin_not_hidden_for_user(plugin):
hidden_lbl = plugin.plugin_labels.get('hidden')
if hidden_lbl and hidden_lbl['status']:
return False
if get_enabled_versions(plugin):
return True
return False
def get_enabled_versions(plugin):
lbs = plugin.version_labels
versions = []
for version, data in lbs.items():
if data.get('enabled', {'status': True}).get('status', True):
versions.append(version)
if not plugin.plugin_labels.get(
'enabled', {'status': True}).get('status', True):
versions = []
return versions
def is_version_of_plugin_deprecated(plugin, version):
lbs = plugin.version_labels
for iter_version, data in lbs.items():
if iter_version == version:
if data.get('deprecated', {'status': False}).get('status', False):
return True
else:
return False
| {
"content_hash": "795b6ea03e0def891aa8f6a4354dce51",
"timestamp": "",
"source": "github",
"line_count": 448,
"max_line_length": 79,
"avg_line_length": 35.20982142857143,
"alnum_prop": 0.5703689615823507,
"repo_name": "openstack/sahara-dashboard",
"id": "0208911862a0166f18320c587f5836f43ed04011",
"size": "16320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sahara_dashboard/content/data_processing/utils/workflow_helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "114998"
},
{
"name": "JavaScript",
"bytes": "17106"
},
{
"name": "Python",
"bytes": "615463"
},
{
"name": "SCSS",
"bytes": "307"
},
{
"name": "Shell",
"bytes": "4077"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_evc_mode
short_description: Enable/Disable EVC mode on vCenter
description:
- This module can be used to enable/disable EVC mode on vCenter.
version_added: 2.9
author:
- Michael Tipton (@castawayegr)
notes:
- Tested on vSphere 6.7
requirements:
- "python >= 2.6"
- PyVmomi
options:
datacenter_name:
description:
- The name of the datacenter the cluster belongs to that you want to enable or disable EVC mode on.
required: True
type: str
cluster_name:
description:
- The name of the cluster to enable or disable EVC mode on.
required: True
type: str
evc_mode:
description:
- Required for C(state=present).
- The EVC mode to enable or disable on the cluster. (intel-broadwell, intel-nehalem, intel-merom, etc.).
required: True
type: str
state:
description:
- Add or remove EVC mode.
choices: [absent, present]
default: present
type: str
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Enable EVC Mode
vmware_evc_mode:
hostname: "{{ groups['vcsa'][0] }}"
username: "{{ vcenter_username }}"
password: "{{ site_password }}"
datacenter_name: "{{ datacenter_name }}"
cluster_name: "{{ cluster_name }}"
evc_mode: "intel-broadwell"
state: present
delegate_to: localhost
register: enable_evc
- name: Disable EVC Mode
vmware_evc_mode:
hostname: "{{ groups['vcsa'][0] }}"
username: "{{ vcenter_username }}"
password: "{{ site_password }}"
datacenter_name: "{{ datacenter_name }}"
cluster_name: "{{ cluster_name }}"
state: absent
delegate_to: localhost
register: disable_evc
'''
RETURN = """
result:
description: information about performed operation
returned: always
type: str
sample: "EVC Mode for 'intel-broadwell' has been enabled."
"""
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.vmware import (PyVmomi, find_datacenter_by_name, find_cluster_by_name,
vmware_argument_spec, wait_for_task, TaskError)
class VMwareEVC(PyVmomi):
def __init__(self, module):
super(VMwareEVC, self).__init__(module)
self.cluster_name = module.params['cluster_name']
self.evc_mode = module.params['evc_mode']
self.datacenter_name = module.params['datacenter_name']
self.desired_state = module.params['state']
self.datacenter = None
self.cluster = None
def process_state(self):
"""
Manage internal states of evc
"""
evc_states = {
'absent': {
'present': self.state_disable_evc,
'absent': self.state_exit_unchanged,
},
'present': {
'present': self.state_update_evc,
'absent': self.state_enable_evc,
}
}
current_state = self.check_evc_configuration()
# Based on the desired_state and the current_state call
# the appropriate method from the dictionary
evc_states[self.desired_state][current_state]()
def check_evc_configuration(self):
"""
Check evc configuration
Returns: 'Present' if evc enabled, else 'absent'
"""
try:
self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name)
if self.datacenter is None:
self.module.fail_json(msg="Datacenter '%s' does not exist." % self.datacenter_name)
self.cluster = self.find_cluster_by_name(cluster_name=self.cluster_name, datacenter_name=self.datacenter)
if self.cluster is None:
self.module.fail_json(msg="Cluster '%s' does not exist." % self.cluster_name)
self.evcm = self.cluster.EvcManager()
if not self.evcm:
self.module.fail_json(msg="Unable to get EVC manager for cluster '%s'." % self.cluster_name)
self.evc_state = self.evcm.evcState
self.current_evc_mode = self.evc_state.currentEVCModeKey
if not self.current_evc_mode:
return 'absent'
return 'present'
except Exception as generic_exc:
self.module.fail_json(msg="Failed to check configuration"
" due to generic exception %s" % to_native(generic_exc))
def state_exit_unchanged(self):
"""
Exit without any change
"""
self.module.exit_json(changed=False, msg="EVC Mode is already disabled on cluster '%s'." % self.cluster_name)
def state_update_evc(self):
"""
Update EVC Mode
"""
changed, result = False, None
try:
if not self.module.check_mode and self.current_evc_mode != self.evc_mode:
evc_task = self.evcm.ConfigureEvcMode_Task(self.evc_mode)
changed, result = wait_for_task(evc_task)
if self.module.check_mode and self.current_evc_mode != self.evc_mode:
changed, result = True, None
if self.current_evc_mode == self.evc_mode:
self.module.exit_json(changed=changed, msg="EVC Mode is already set to '%(evc_mode)s' on '%(cluster_name)s'." % self.params)
self.module.exit_json(changed=changed, msg="EVC Mode has been updated to '%(evc_mode)s' on '%(cluster_name)s'." % self.params)
except TaskError as invalid_argument:
self.module.fail_json(msg="Failed to update EVC mode: %s" % to_native(invalid_argument))
def state_enable_evc(self):
"""
Enable EVC Mode
"""
changed, result = False, None
try:
if not self.module.check_mode:
evc_task = self.evcm.ConfigureEvcMode_Task(self.evc_mode)
changed, result = wait_for_task(evc_task)
if self.module.check_mode:
changed, result = True, None
self.module.exit_json(changed=changed, msg="EVC Mode for '%(evc_mode)s' has been enabled on '%(cluster_name)s'." % self.params)
except TaskError as invalid_argument:
self.module.fail_json(msg="Failed to enable EVC mode: %s" % to_native(invalid_argument))
def state_disable_evc(self):
"""
Disable EVC Mode
"""
changed, result = False, None
try:
if not self.module.check_mode:
evc_task = self.evcm.DisableEvcMode_Task()
changed, result = wait_for_task(evc_task)
if self.module.check_mode:
changed, result = True, None
self.module.exit_json(changed=changed, msg="EVC Mode has been disabled on cluster '%s'." % self.cluster_name)
except TaskError as invalid_argument:
self.module.fail_json(msg="Failed to disable EVC mode: %s" % to_native(invalid_argument))
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(
cluster_name=dict(type='str', required=True),
datacenter_name=dict(type='str', required=True),
evc_mode=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'present', ['cluster_name', 'datacenter_name', 'evc_mode']]
]
)
vmware_evc = VMwareEVC(module)
vmware_evc.process_state()
if __name__ == '__main__':
main()
| {
"content_hash": "466a91ff6f6ec0afbb2ea31d17df7be2",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 140,
"avg_line_length": 35.43805309734513,
"alnum_prop": 0.5962042701960295,
"repo_name": "thaim/ansible",
"id": "935d356e3ad4d304e3ac236523d733ea21935104",
"size": "8204",
"binary": false,
"copies": "30",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/cloud/vmware/vmware_evc_mode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
import sys
import traceback
from flask import Flask, request
import requests
app = Flask(__name__)
def safeget(d, keys, default=None):
for key in keys:
try:
d = d[key]
except KeyError:
return default
except TypeError:
return default
return d
@app.route("/", methods=['GET'])
def index(*args, **kwargs):
try:
apiKey = request.args.get('apiKey', '')
lat = request.args.get('lat', '40.71')
lon = request.args.get('lon', '-74')
units = request.args.get('units', 'us')
lang = request.args.get('lang', 'en')
r = requests.get('https://api.forecast.io/forecast/{apiKey}/{lat},{lon}?units={units}&lang={lang}'.format(**locals()))
weather = r.json()
current_temp = safeget(weather, ['currently', 'temperature'], 0)
current_humidity = safeget(weather, ['currently', 'humidity'], 0)
current_icon = safeget(weather, ['minutely', 'icon'], '')
current_summary = safeget(weather, ['minutely', 'summary'], '')
today = {}
tomorrow = {}
daily = safeget(weather, ['daily', 'data'])
if len(daily) > 0:
today = daily[0]
if len(daily) > 1:
tomorrow = daily[1]
today_max_temp = safeget(today, ['temperatureMax'], 0)
today_min_temp = safeget(today, ['temperatureMin'], 0)
today_icon = safeget(today, ['icon'], '')
today_summary = safeget(today, ['summary'], '')
tomorrow_max_temp = safeget(tomorrow, ['temperatureMax'], 0)
tomorrow_min_temp = safeget(tomorrow, ['temperatureMin'], 0)
tomorrow_icon = safeget(tomorrow, ['icon'], '')
tomorrow_summary = safeget(tomorrow, ['summary'], '')
body = ("CURRENT_TEMP={current_temp}\n" +
"CURRENT_HUMIDITY={current_humidity}\n" +
"CURRENT_ICON={current_icon}\n" +
"CURRENT_SUMMARY={current_summary}\n" +
"MAX_TEMP_TODAY={today_max_temp}\n" +
"MIN_TEMP_TODAY={today_min_temp}\n" +
"ICON_TODAY={today_icon}\n" +
"SUMMARY_TODAY={today_summary}\n" +
"MAX_TEMP_TOMORROW={tomorrow_max_temp}\n" +
"ICON_TOMORROW={tomorrow_icon}\n" +
"MIN_TEMP_TOMORROW={tomorrow_min_temp}\n" +
"SUMMARY_TOMORROW={tomorrow_summary}\n"
).format(**locals())
print body
return body
except:
traceback.print_exc()
return "Uh, unexpected error"
if __name__ == "__main__":
app.run(host='0.0.0.0')
| {
"content_hash": "5969a43d44c7e9cfab990b925467d3f9",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 120,
"avg_line_length": 29.87837837837838,
"alnum_prop": 0.6395296246042514,
"repo_name": "markwal/TrivialForecastIoFlask",
"id": "7dbf9a9f5cdc633635abbf4a1dba73d9715471d1",
"size": "2211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "weather.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2211"
},
{
"name": "Shell",
"bytes": "2927"
}
],
"symlink_target": ""
} |
import csv
from scripts.base import QcatApiMixin
class QcatDataCsv(QcatApiMixin):
"""
Store QCAT API data as CSV.
"""
def check_local_questionnaires(self, local_questionnaires):
if local_questionnaires:
# Sanity check
self._check_attribute_length(local_questionnaires[0])
def read_data(self) -> list:
"""Read a local CSV file"""
rows = []
output_file = self.get_output_file_path()
try:
with open(output_file, newline='',encoding='utf-8') as csv_file:
reader = csv.DictReader(csv_file, **self._get_csv_options())
for row in reader:
rows.append(row)
self.log(
'Found existing attribute file "{output_file}" with {len_rows}'
' entries.'.format(
output_file=output_file, len_rows=len(rows)))
except FileNotFoundError:
self.log('No existing attribute file found.')
return rows
def write_data(self, data: list) -> None:
"""Write a local CSV file"""
try:
fieldnames = data[0].keys()
except IndexError:
fieldnames = []
output_file = self.get_output_file_path()
with open(output_file, 'w', newline='',encoding='utf-8') as csv_file:
writer = csv.DictWriter(
csv_file, fieldnames=fieldnames, **self._get_csv_options())
writer.writeheader()
for row in data:
writer.writerow(row)
print('Output file "{output_file}" written.'.format(
output_file=output_file))
@staticmethod
def _get_csv_options() -> dict:
"""Options for CSV writing and reading"""
return {'delimiter': ';', 'quoting': csv.QUOTE_NONNUMERIC}
def _check_attribute_length(self, local_questionnaire: dict):
"""
Small sanity check: If number of attributes in local file does not match
those in configuration, return.
"""
if len(self._get_basic_attributes({})) + \
len(self.config.qcat_attributes) != \
len(local_questionnaire):
qcat_attributes_length = len(self._get_basic_attributes({})) + \
len(self.config.qcat_attributes)
self.error(
'Number of attributes ({len_local_questionnaires}) in the local'
' file "{output_file_path}" does not match number of '
'qcat_attributes ({qcat_attributes_length}) set in the '
'configuration file. Please adjust configuration or delete '
'previous output file.'.format(
len_local_questionnaires=len(local_questionnaire),
output_file_path=self.get_output_file_path(),
qcat_attributes_length=qcat_attributes_length
))
| {
"content_hash": "5c7fa0116dd288d6156a9864fd48e2f2",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 80,
"avg_line_length": 39.93150684931507,
"alnum_prop": 0.5588336192109777,
"repo_name": "CDE-UNIBE/qcat-api-scripts",
"id": "deb214fe1ef7b41d27bef7bf48d39464f3ff7b90",
"size": "2915",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/data_csv.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "569475"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import os
import shutil
from typing import Sequence
from airflow.compat.functools import cached_property
from airflow.exceptions import AirflowException, AirflowSkipException
from airflow.hooks.subprocess import SubprocessHook
from airflow.models.baseoperator import BaseOperator
from airflow.utils.context import Context
from airflow.utils.operator_helpers import context_to_airflow_vars
class BashOperator(BaseOperator):
r"""
Execute a Bash script, command or set of commands.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BashOperator`
If BaseOperator.do_xcom_push is True, the last line written to stdout
will also be pushed to an XCom when the bash command completes
:param bash_command: The command, set of commands or reference to a
bash script (must be '.sh') to be executed. (templated)
:param env: If env is not None, it must be a dict that defines the
environment variables for the new process; these are used instead
of inheriting the current process environment, which is the default
behavior. (templated)
:param append_env: If False(default) uses the environment variables passed in env params
and does not inherit the current process environment. If True, inherits the environment variables
from current passes and then environment variable passed by the user will either update the existing
inherited environment variables or the new variables gets appended to it
:param output_encoding: Output encoding of bash command
:param skip_exit_code: If task exits with this exit code, leave the task
in ``skipped`` state (default: 99). If set to ``None``, any non-zero
exit code will be treated as a failure.
:param cwd: Working directory to execute the command in.
If None (default), the command is run in a temporary directory.
Airflow will evaluate the exit code of the bash command. In general, a non-zero exit code will result in
task failure and zero will result in task success. Exit code ``99`` (or another set in ``skip_exit_code``)
will throw an :class:`airflow.exceptions.AirflowSkipException`, which will leave the task in ``skipped``
state. You can have all non-zero exit codes be treated as a failure by setting ``skip_exit_code=None``.
.. list-table::
:widths: 25 25
:header-rows: 1
* - Exit code
- Behavior
* - 0
- success
* - `skip_exit_code` (default: 99)
- raise :class:`airflow.exceptions.AirflowSkipException`
* - otherwise
- raise :class:`airflow.exceptions.AirflowException`
.. note::
Airflow will not recognize a non-zero exit code unless the whole shell exit with a non-zero exit
code. This can be an issue if the non-zero exit arises from a sub-command. The easiest way of
addressing this is to prefix the command with ``set -e;``
Example:
.. code-block:: python
bash_command = "set -e; python3 script.py '{{ next_execution_date }}'"
.. note::
Add a space after the script name when directly calling a ``.sh`` script with the
``bash_command`` argument -- for example ``bash_command="my_script.sh "``. This
is because Airflow tries to apply load this file and process it as a Jinja template to
it ends with ``.sh``, which will likely not be what most users want.
.. warning::
Care should be taken with "user" input or when using Jinja templates in the
``bash_command``, as this bash operator does not perform any escaping or
sanitization of the command.
This applies mostly to using "dag_run" conf, as that can be submitted via
users in the Web UI. Most of the default template variables are not at
risk.
For example, do **not** do this:
.. code-block:: python
bash_task = BashOperator(
task_id="bash_task",
bash_command='echo "Here is the message: \'{{ dag_run.conf["message"] if dag_run else "" }}\'"',
)
Instead, you should pass this via the ``env`` kwarg and use double-quotes
inside the bash_command, as below:
.. code-block:: python
bash_task = BashOperator(
task_id="bash_task",
bash_command="echo \"here is the message: '$message'\"",
env={"message": '{{ dag_run.conf["message"] if dag_run else "" }}'},
)
"""
template_fields: Sequence[str] = ("bash_command", "env")
template_fields_renderers = {"bash_command": "bash", "env": "json"}
template_ext: Sequence[str] = (
".sh",
".bash",
)
ui_color = "#f0ede4"
def __init__(
self,
*,
bash_command: str,
env: dict[str, str] | None = None,
append_env: bool = False,
output_encoding: str = "utf-8",
skip_exit_code: int = 99,
cwd: str | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.bash_command = bash_command
self.env = env
self.output_encoding = output_encoding
self.skip_exit_code = skip_exit_code
self.cwd = cwd
self.append_env = append_env
@cached_property
def subprocess_hook(self):
"""Returns hook for running the bash command"""
return SubprocessHook()
def get_env(self, context):
"""Builds the set of environment variables to be exposed for the bash command"""
system_env = os.environ.copy()
env = self.env
if env is None:
env = system_env
else:
if self.append_env:
system_env.update(env)
env = system_env
airflow_context_vars = context_to_airflow_vars(context, in_env_var_format=True)
self.log.debug(
"Exporting the following env vars:\n%s",
"\n".join(f"{k}={v}" for k, v in airflow_context_vars.items()),
)
env.update(airflow_context_vars)
return env
def execute(self, context: Context):
bash_path = shutil.which("bash") or "bash"
if self.cwd is not None:
if not os.path.exists(self.cwd):
raise AirflowException(f"Can not find the cwd: {self.cwd}")
if not os.path.isdir(self.cwd):
raise AirflowException(f"The cwd {self.cwd} must be a directory")
env = self.get_env(context)
result = self.subprocess_hook.run_command(
command=[bash_path, "-c", self.bash_command],
env=env,
output_encoding=self.output_encoding,
cwd=self.cwd,
)
if self.skip_exit_code is not None and result.exit_code == self.skip_exit_code:
raise AirflowSkipException(f"Bash command returned exit code {self.skip_exit_code}. Skipping.")
elif result.exit_code != 0:
raise AirflowException(
f"Bash command failed. The command returned a non-zero exit code {result.exit_code}."
)
return result.output
def on_kill(self) -> None:
self.subprocess_hook.send_sigterm()
| {
"content_hash": "81ca70ef4e41f56b39a2326f8a2c00c4",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 110,
"avg_line_length": 39.3027027027027,
"alnum_prop": 0.630999862467336,
"repo_name": "apache/airflow",
"id": "d50733bafa33ff10ce114763a3f08f03a717ecf3",
"size": "8058",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "airflow/operators/bash.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "71458"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "172957"
},
{
"name": "JavaScript",
"bytes": "143915"
},
{
"name": "Jinja",
"bytes": "38911"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23697738"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211306"
},
{
"name": "TypeScript",
"bytes": "521019"
}
],
"symlink_target": ""
} |
from GracePlot import *
import math
p = GracePlot() # A grace session opens
l1=Line(type=lines.none)
x1=map(lambda x:x/10.,range(0,100))
y1=map(math.sin,x1)
y2=map(math.cos,x1)
d2=Data(x=x1,y=y1,
symbol=Symbol(symbol=symbols.circle,fillcolor=colors.red),
line=l1)
d3=Data(x=x1,y=y2,
symbol=Symbol(symbol=symbols.circle,fillcolor=colors.blue),
line=l1)
g=p[0]
g.plot([d2,d3])
g.xaxis(label=Label('X axis',font=5,charsize=1.5),
tick=Tick(majorgrid=True,majorlinestyle=lines.dashed,majorcolor=colors.blue,
minorgrid=True,minorlinestyle=lines.dotted,minorcolor=colors.blue))
g.yaxis(tick=Tick(majorgrid=True,majorlinestyle=lines.dashed,majorcolor=colors.blue,
minorgrid=True,minorlinestyle=lines.dotted,minorcolor=colors.blue))
| {
"content_hash": "34118d3c46c58a265427e6a3b00d902e",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 85,
"avg_line_length": 28.857142857142858,
"alnum_prop": 0.6992574257425742,
"repo_name": "rhambach/EELcalc",
"id": "0ee5cea8cea489faf6b3b1f837c0261249bffa06",
"size": "808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "external/GracePlot/grace_example2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PostScript",
"bytes": "1041892"
},
{
"name": "Python",
"bytes": "44585"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bus', '0008_auto_20170314_1412'),
]
operations = [
migrations.AlterField(
model_name='busdestinationarea',
name='area_name',
field=models.CharField(choices=[('MBARARA', 'Mbarara'), ('GULU', 'Gulu'), ('MASAKA', 'Masaka'), ('LIRA', 'Lira'), ('FORTPORTAL', 'FortPortal'), ('JINJA', 'Jinja'), ('KABALE', 'Kabale'), ('PAKWACH', 'Pakwach'), ('Gayaza', 'Gayaza'), ('Kampala', 'Kampala')], max_length=70),
),
migrations.AlterField(
model_name='buspickarea',
name='area_name',
field=models.CharField(choices=[('MBARARA', 'Mbarara'), ('GULU', 'Gulu'), ('MASAKA', 'Masaka'), ('LIRA', 'Lira'), ('FORTPORTAL', 'FortPortal'), ('JINJA', 'Jinja'), ('KABALE', 'Kabale'), ('PAKWACH', 'Pakwach'), ('Gayaza', 'Gayaza'), ('Kampala', 'Kampala')], max_length=70),
),
]
| {
"content_hash": "6b4f9c954e3ee35c9bb7651baaa2d3c9",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 284,
"avg_line_length": 44.26086956521739,
"alnum_prop": 0.5677799607072691,
"repo_name": "warlock57/bus_reservation",
"id": "17fee95496fd3081f551e1f9448b110f404ac294",
"size": "1091",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bus/migrations/0009_auto_20170314_1432.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "588350"
},
{
"name": "HTML",
"bytes": "109044"
},
{
"name": "JavaScript",
"bytes": "411869"
},
{
"name": "Python",
"bytes": "78652"
}
],
"symlink_target": ""
} |
'''
Paste a small image into a big one.
-----------------------------------------------------------
(c) 2013 Allegra Via and Kristian Rother
Licensed under the conditions of the Python License
This code appears in section 18.4.1 of the book
"Managing Biological Data with Python".
-----------------------------------------------------------
'''
from PIL import Image
image = Image.open('color.png', 'r')
label = Image.open('label.png', 'r')
image.paste(label, (40, 460))
image.save('combined.png')
| {
"content_hash": "47cc12fdcf52d467d208e34e7cc42359",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 59,
"avg_line_length": 27.105263157894736,
"alnum_prop": 0.5359223300970873,
"repo_name": "raymonwu/Managing_Your_Biological_Data_with_Python_3",
"id": "9dc847ce2ea0e8f7cc390992f58697816e77e8f1",
"size": "515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "18-image_manipulation/combine_two_images.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "96353"
},
{
"name": "Jupyter Notebook",
"bytes": "3309089"
},
{
"name": "Python",
"bytes": "103196"
}
],
"symlink_target": ""
} |
"""Models for API objects"""
from datetime import datetime
import re
import ticketpy
def _assign_links(obj, json_obj, base_url=None):
"""Assigns ``links`` attribute to an object from JSON"""
# Normal link strucutre is {link_name: {'href': url}},
# but some responses also have lists of other models.
# API occasionally returns bad URLs (with {&sort} and similar)
json_links = json_obj.get('_links')
if not json_links:
obj.links = {}
else:
obj_links = {}
for k, v in json_links.items():
if 'href' in v:
href = re.sub("({.+})", "", v['href'])
if base_url:
href = "{}{}".format(base_url, href)
obj_links[k] = href
else:
obj_links[k] = v
obj.links = obj_links
class Page(list):
"""API response page"""
def __init__(self, number=None, size=None, total_elements=None,
total_pages=None):
super().__init__([])
self.number = number
self.size = size
self.total_elements = total_elements
self.total_pages = total_pages
@staticmethod
def from_json(json_obj):
"""Instantiate and return a Page(list)"""
pg = Page()
pg.json = json_obj
_assign_links(pg, json_obj, ticketpy.ApiClient.root_url)
pg.number = json_obj['page']['number']
pg.size = json_obj['page']['size']
pg.total_pages = json_obj['page']['totalPages']
pg.total_elements = json_obj['page']['totalElements']
embedded = json_obj.get('_embedded')
if not embedded:
return pg
object_models = {
'events': Event,
'venues': Venue,
'attractions': Attraction,
'classifications': Classification
}
for k, v in embedded.items():
if k in object_models:
obj_type = object_models[k]
pg += [obj_type.from_json(obj) for obj in v]
return pg
def __str__(self):
return (
"Page {number}/{total_pages}, "
"Size: {size}, "
"Total elements: {total_elements}"
).format(**self.__dict__)
class Event:
"""Ticketmaster event
The JSON returned from the Discovery API (at least, as far as
what's being used here) looks like:
.. code-block:: json
{
"name": "Event name",
"dates": {
"start": {
"localDate": "2019-04-01",
"localTime": "2019-04-01T23:00:00Z"
},
"status": {
"code": "onsale"
}
},
"classifications": [
{
"genre": {
"name": "Rock"
}
},
{
"genre": {
"name": "Funk"
}
}
],
"priceRanges": [
{
"min": 10,
"max": 25
}
],
"_embedded": {
"venues": [
{
"name": "The Tabernacle"
}
]
}
}
"""
def __init__(self, event_id=None, name=None, start_date=None,
start_time=None, status=None, price_ranges=None,
venues=None, utc_datetime=None, classifications=None,
links=None):
self.id = event_id
self.name = name
#: **Local** start date (*YYYY-MM-DD*)
self.local_start_date = start_date
#: **Local** start time (*HH:MM:SS*)
self.local_start_time = start_time
#: Sale status (such as *Cancelled, Offsale...*)
self.status = status
self.classifications = classifications
self.price_ranges = price_ranges
self.venues = venues
self.links = links
self.__utc_datetime = None
if utc_datetime is not None:
self.utc_datetime = utc_datetime
@property
def utc_datetime(self):
"""Start date/time in UTC (*YYYY-MM-DDTHH:MM:SSZ*)"""
return self.__utc_datetime
@utc_datetime.setter
def utc_datetime(self, utc_datetime):
if not utc_datetime:
self.__utc_datetime = None
else:
ts_format = "%Y-%m-%dT%H:%M:%SZ"
self.__utc_datetime = datetime.strptime(utc_datetime, ts_format)
@staticmethod
def from_json(json_event):
"""Creates an ``Event`` from API's JSON response"""
e = Event()
e.json = json_event
e.id = json_event.get('id')
e.name = json_event.get('name')
dates = json_event.get('dates', {})
start_dates = dates.get('start', {})
e.local_start_date = start_dates.get('localDate')
e.local_start_time = start_dates.get('localTime')
e.utc_datetime = start_dates.get('dateTime')
status = dates.get('status', {})
e.status = status.get('code')
if 'classifications' in json_event:
e.classifications = [EventClassification.from_json(cl)
for cl in json_event['classifications']]
price_ranges = []
if 'priceRanges' in json_event:
for pr in json_event['priceRanges']:
pr_dict = {}
if 'min' in pr:
pr_dict['min'] = pr['min']
if 'max' in pr:
pr_dict['max'] = pr['max']
price_ranges.append(pr_dict)
e.price_ranges = price_ranges
venues = []
if 'venues' in json_event.get('_embedded', {}):
for v in json_event['_embedded']['venues']:
venues.append(Venue.from_json(v))
e.venues = venues
_assign_links(e, json_event)
return e
def __str__(self):
tmpl = ("Event: {name}\n"
"Venues: {venues}\n"
"Start date: {local_start_date}\n"
"Start time: {local_start_time}\n"
"Price ranges: {price_ranges}\n"
"Status: {status}\n"
"Classifications: {classifications!s}\n")
return tmpl.format(**self.__dict__)
class Venue:
"""A Ticketmaster venue
The JSON returned from the Discovery API looks something like this
(*edited for brevity*):
.. code-block:: json
{
"id": "KovZpaFEZe",
"name": "The Tabernacle",
"url": "http://www.ticketmaster.com/venue/115031",
"timezone": "America/New_York",
"address": {
"line1": "152 Luckie Street"
},
"city": {
"name": "Atlanta"
},
"postalCode": "30303",
"state": {
"stateCode": "GA",
"name": "Georgia"
},
"country": {
"name": "United States Of America",
"countryCode": "US"
},
"location": {
"latitude": "33.758688",
"longitude": "-84.391449"
},
"social": {
"twitter": {
"handle": "@TabernacleATL"
}
},
"markets": [
{
"id": "10"
}
]
}
"""
def __init__(self, name=None, address=None, city=None, state_code=None,
postal_code=None, latitude=None, longitude=None,
markets=None, url=None, box_office_info=None,
dmas=None, general_info=None, venue_id=None,
social=None, timezone=None, images=None,
parking_detail=None, accessible_seating_detail=None,
links=None):
self.name = name
self.id = venue_id
self.address = address
self.postal_code = postal_code
self.city = city
#: State code (ex: 'GA' not 'Georgia')
self.state_code = state_code
self.latitude = latitude
self.longitude = longitude
self.timezone = timezone
self.url = url
self.box_office_info = box_office_info
self.dmas = dmas
self.markets = markets
self.general_info = general_info
self.social = social
self.images = images
self.parking_detail = parking_detail
self.accessible_seating_detail = accessible_seating_detail
self.links = links
@property
def location(self):
"""Location-based data (full address, lat/lon, timezone"""
return {
'address': self.address,
'postal_code': self.postal_code,
'city': self.city,
'state_code': self.state_code,
'timezone': self.timezone,
'latitude': self.latitude,
'longitude': self.longitude
}
@staticmethod
def from_json(json_venue):
"""Returns a ``Venue`` object from JSON"""
v = Venue()
v.json = json_venue
v.id = json_venue.get('id')
v.name = json_venue.get('name')
v.url = json_venue.get('url')
v.postal_code = json_venue.get('postalCode')
v.general_info = json_venue.get('generalInfo')
v.box_office_info = json_venue.get('boxOfficeInfo')
v.dmas = json_venue.get('dmas')
v.social = json_venue.get('social')
v.timezone = json_venue.get('timezone')
v.images = json_venue.get('images')
v.parking_detail = json_venue.get('parkingDetail')
v.accessible_seating_detail = json_venue.get('accessibleSeatingDetail')
if 'markets' in json_venue:
v.markets = [m.get('id') for m in json_venue.get('markets')]
if 'city' in json_venue:
v.city = json_venue['city'].get('name')
if 'address' in json_venue:
v.address = json_venue['address'].get('line1')
if 'location' in json_venue:
v.latitude = json_venue['location'].get('latitude')
v.longitude = json_venue['location'].get('longitude')
if 'state' in json_venue:
v.state_code = json_venue['state'].get('stateCode')
_assign_links(v, json_venue)
return v
def __str__(self):
return ("{name} at {address} in "
"{city} {state_code}").format(**self.__dict__)
class Attraction:
"""Attraction"""
def __init__(self, attraction_id=None, attraction_name=None, url=None,
classifications=None, images=None, test=None, links=None):
self.id = attraction_id
self.name = attraction_name
self.url = url
self.classifications = classifications
self.images = images
self.test = test
self.links = links
@staticmethod
def from_json(json_obj):
"""Convert JSON object to ``Attraction`` object"""
att = Attraction()
att.json = json_obj
att.id = json_obj.get('id')
att.name = json_obj.get('name')
att.url = json_obj.get('url')
att.test = json_obj.get('test')
att.images = json_obj.get('images')
classifications = json_obj.get('classifications')
att.classifications = [
Classification.from_json(cl) for cl in classifications
]
_assign_links(att, json_obj)
return att
def __str__(self):
return str(self.name) if self.name is not None else 'Unknown'
class Classification:
"""Classification object (segment/genre/sub-genre)
For the structure returned by ``EventSearch``, see ``EventClassification``
"""
def __init__(self, segment=None, classification_type=None, subtype=None,
primary=None, links=None):
self.segment = segment
self.type = classification_type
self.subtype = subtype
self.primary = primary
self.links = links
@staticmethod
def from_json(json_obj):
"""Create/return ``Classification`` object from JSON"""
cl = Classification()
cl.json = json_obj
cl.primary = json_obj.get('primary')
if 'segment' in json_obj:
cl.segment = Segment.from_json(json_obj['segment'])
if 'type' in json_obj:
cl_t = json_obj['type']
cl.type = ClassificationType(cl_t['id'], cl_t['name'])
if 'subType' in json_obj:
cl_st = json_obj['subType']
cl.subtype = ClassificationSubType(cl_st['id'], cl_st['name'])
_assign_links(cl, json_obj)
return cl
def __str__(self):
return str(self.type)
class EventClassification:
"""Classification as it's represented in event search results
See ``Classification()`` for results from classification searches
"""
def __init__(self, genre=None, subgenre=None, segment=None,
classification_type=None, classification_subtype=None,
primary=None, links=None):
self.genre = genre
self.subgenre = subgenre
self.segment = segment
self.type = classification_type
self.subtype = classification_subtype
self.primary = primary
self.links = links
@staticmethod
def from_json(json_obj):
"""Create/return ``EventClassification`` object from JSON"""
ec = EventClassification()
ec.json = json_obj
ec.primary = json_obj.get('primary')
segment = json_obj.get('segment')
if segment:
ec.segment = Segment.from_json(segment)
genre = json_obj.get('genre')
if genre:
ec.genre = Genre.from_json(genre)
subgenre = json_obj.get('subGenre')
if subgenre:
ec.subgenre = SubGenre.from_json(subgenre)
cl_t = json_obj.get('type')
if cl_t:
ec.type = ClassificationType(cl_t['id'], cl_t['name'])
cl_st = json_obj.get('subType')
if cl_st:
ec.subtype = ClassificationSubType(cl_st['id'], cl_st['name'])
_assign_links(ec, json_obj)
return ec
def __str__(self):
return ("Segment: {segment} / "
"Genre: {genre} / "
"Subgenre: {subgenre} / "
"Type: {type} / "
"Subtype: {subtype}").format(**self.__dict__)
class ClassificationType:
def __init__(self, type_id=None, type_name=None, subtypes=None):
self.id = type_id
self.name = type_name
self.subtypes = subtypes
def __str__(self):
return self.name if self.name is not None else 'Unknown'
class ClassificationSubType:
def __init__(self, type_id=None, type_name=None):
self.id = type_id
self.name = type_name
def __str__(self):
return self.name if self.name is not None else 'Unknown'
class Segment:
def __init__(self, segment_id=None, segment_name=None, genres=None,
links=None):
self.id = segment_id
self.name = segment_name
self.genres = genres
self.links = links
@staticmethod
def from_json(json_obj):
"""Create and return a ``Segment`` from JSON"""
seg = Segment()
seg.json = json_obj
seg.id = json_obj['id']
seg.name = json_obj.get('name')
if '_embedded' in json_obj:
genres = json_obj['_embedded']['genres']
seg.genres = [Genre.from_json(g) for g in genres]
_assign_links(seg, json_obj)
return seg
def __str__(self):
return self.name if self.name is not None else 'Unknown'
class Genre:
def __init__(self, genre_id=None, genre_name=None, subgenres=None,
links=None):
self.id = genre_id
self.name = genre_name
self.subgenres = subgenres
self.links = links
@staticmethod
def from_json(json_obj):
g = Genre()
g.json = json_obj
g.id = json_obj.get('id')
g.name = json_obj.get('name')
if '_embedded' in json_obj:
embedded = json_obj['_embedded']
subgenres = embedded['subgenres']
g.subgenres = [SubGenre.from_json(sg) for sg in subgenres]
_assign_links(g, json_obj)
return g
def __str__(self):
return self.name if self.name is not None else 'Unknown'
class SubGenre:
def __init__(self, subgenre_id=None, subgenre_name=None, links=None):
self.id = subgenre_id
self.name = subgenre_name
self.links = links
@staticmethod
def from_json(json_obj):
sg = SubGenre()
sg.json = json_obj
sg.id = json_obj['id']
sg.name = json_obj['name']
_assign_links(sg, json_obj)
return sg
def __str__(self):
return self.name if self.name is not None else 'Unknown'
| {
"content_hash": "db0d08e26259ef75ade37f54af431983",
"timestamp": "",
"source": "github",
"line_count": 544,
"max_line_length": 79,
"avg_line_length": 31.38051470588235,
"alnum_prop": 0.5160799015874875,
"repo_name": "arcward/ticketpy",
"id": "eb35a0ee513efaebf41d330b07090993230257e5",
"size": "17071",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ticketpy/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52308"
}
],
"symlink_target": ""
} |
"""Script to extract the fst between two populations.
Usage: python3 fst_to_genomic_score.py 1 2 < popoolation2.fst > pop1_2.fst
Output is chr, pos, fst
"""
import sys
from numpy import sqrt
def triangular_number(number):
'''Finds if a number is triangular, and if so, computer the base of that
triangle. It is necessary to know the number of populations involved in the
analysis of the Fst.
>>> triangular_number(1)
1
>>> triangular_number(3)
3
>>> triangular_number(2)
-1
'''
base = (1 + sqrt(1 + 8 * number))/2
if base.is_integer():
return int(base)
return -1
def populations_to_index(pop1, pop2, npop):
"""Get the index of the column of a pair of populations in the fst file from
popoolation2.
Columns in the fst file are
chromosome, center of the window, number of snps, covered fraction,
coverage, fst 1:2, fst 1:3, ..., fst 1:n, fst 2:3, ..., fst 2:n, ...,
fst n-1:n
Got from here: https://stackoverflow.com/a/27088560/4730336
>>> populations_to_index(1, 2, 2)
1
>>> populations_to_index(1, 2, 22)
1
>>> populations_to_index(2, 3, 22)
22
>>> populations_to_index(21, 22, 22)
231
"""
pop1 = pop1 - 1
pop2 = pop2 - 1
big_triangle = int(npop * (npop - 1) / 2)
small_triangle = int((npop - pop1) * (npop - pop1 - 1) / 2)
k = big_triangle - small_triangle + pop2 - pop1
return k
if __name__ == '__main__':
POPULATION_1 = int(sys.argv[1])
POPULATION_2 = int(sys.argv[2])
if POPULATION_1 == POPULATION_2:
raise SystemExit("Populations must be different")
# Swap
if POPULATION_1 >= POPULATION_2:
POPULATION_1, POPULATION_2 = POPULATION_2, POPULATION_1
NPOP = None
for line in sys.stdin:
line = line.strip().split("\t")
n_analysis = len(line) - 5
if not NPOP:
NPOP = int(triangular_number(n_analysis))
chromosome, position, _, _, _, *analysis = line
if POPULATION_2 >= NPOP + 1:
raise SystemExit("Error with analysis to be extracted, exitting")
# index = pop1 + pop2 - 1
index = populations_to_index(POPULATION_1, POPULATION_2, NPOP) - 1
#sys.stderr.write(f"{index}")
fst = analysis[index]
fst = fst.split("=")[1]
sys.stdout.write(f"{chromosome}\t{position}\t{fst}\n")
| {
"content_hash": "347233d7f1cced5f47ef5ebdd684e73e",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 80,
"avg_line_length": 28.44047619047619,
"alnum_prop": 0.5985768103809125,
"repo_name": "jlanga/smsk_popoolation",
"id": "253d39da8b0be7312e647c412aef5511377f958b",
"size": "2413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/fst_to_genomic_score.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "15487"
},
{
"name": "Perl",
"bytes": "607182"
},
{
"name": "Python",
"bytes": "39328"
},
{
"name": "R",
"bytes": "3647"
},
{
"name": "Shell",
"bytes": "2635"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from django.conf.urls import include
from django.contrib import admin
from django.contrib.sitemaps.views import sitemap
from django.urls import path
from blog.sitemap import HomePageSiteMap
from blog.sitemap import PostSiteMap
from blog.sitemap import StaticSiteMap
app_name = "core"
sitemaps = {
"posts": PostSiteMap,
"static": StaticSiteMap,
"home": HomePageSiteMap,
}
urlpatterns = [
path("admin/", admin.site.urls),
path("", include("blog.urls", namespace="blog")),
path(
"sitemap.xml",
sitemap,
{"sitemaps": sitemaps},
name="django.contrib.sitemaps.views.sitsitemap",
),
]
| {
"content_hash": "11ae7a1478405e2e0a8c96b69f0a880a",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 56,
"avg_line_length": 21.870967741935484,
"alnum_prop": 0.6932153392330384,
"repo_name": "andreztz/DjangoBlog",
"id": "20742670dc905f01825a9e0f6040bcc2fb83785a",
"size": "678",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4916"
},
{
"name": "HTML",
"bytes": "15705"
},
{
"name": "JavaScript",
"bytes": "272"
},
{
"name": "PowerShell",
"bytes": "468"
},
{
"name": "Python",
"bytes": "19252"
}
],
"symlink_target": ""
} |
import numpy as np
import logging
from maskgen.cv2api import cv2api_delegate
import maskgen
from maskgen import video_tools
def createOutput(in_file, out_file):
cap = cv2api_delegate.videoCapture(in_file)
out_file = out_file
fourcc = 0
fps = cap.get(cv2api_delegate.prop_fps)
height = int(np.rint(cap.get(cv2api_delegate.prop_frame_height)))
width = int(np.rint(cap.get(cv2api_delegate.prop_frame_width)))
out_video = cv2api_delegate.videoWriter(out_file, fourcc, fps, (width,height))
if not out_video.isOpened():
err = ("Error opening video" + in_file + " fourcc: " + str(fourcc) +" FPS: "+ str(fps)+
" H: "+str(height)+" W: "+ str(width) )
raise ValueError(err)
return out_video, cap
def dropDupFrames(in_file,out_file, thresh):
logger = logging.getLogger('maskgen')
debug = logger.isEnabledFor(logging.DEBUG)
out, cap =createOutput(in_file,out_file)
more_frames, frame = cap.read()
if not(more_frames):
raise ValueError("Error Reading Frames From {}".format(in_file))
past=np.mean(frame, axis=2)
out.write(frame)
more_frames, frame = cap.read()
if debug:
i=0
j=0
while (more_frames):
if debug:
i+=1
future = np.mean(frame, axis=2)
a=int(round(np.std((past - future))))
if a>int(thresh):
out.write(frame)
if debug:
logger.debug("Keeping Frame {} with difference of {}".format(i, a))
elif debug:
j+=1
logger.debug('dropping frame {} with difference of {}'.format( i, a))
past=future
more_frames, frame = cap.read()
if debug:
logger.debug('Dropped a total of {} Frames'.format(j))
cap.release()
out.release()
def transform(img,source,target, **kwargs):
dropDupFrames(source,target,kwargs['Threshold'] if 'Threshold' in kwargs else 3)
return {'Start Time':1},None
def operation():
return {'name':'DuplicateFrameDrop',
'category':'PostProcessing',
'description':'Remove any duplicate frames from a video with a certain threshold',
'software':'maskgen',
'version':maskgen.__version__[0:3],
'arguments':{
'Threshold':{
'type':'int[0:100]',
'defaultvalue':3,
'description':'Threshold to determine how alike the frames have to be lower threshold more alike'
}
},
'transitions': [
'video.video'
]
}
def suffix():
return '.avi'
| {
"content_hash": "62fc6118988bd4dedac60793be9da650",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 117,
"avg_line_length": 34.298701298701296,
"alnum_prop": 0.5804619462324877,
"repo_name": "rwgdrummer/maskgen",
"id": "b1bab4fc93382177e7eac132e445b21cf821247b",
"size": "2641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/FrameDuplicateDrop/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "544"
},
{
"name": "Dockerfile",
"bytes": "4825"
},
{
"name": "NSIS",
"bytes": "4907"
},
{
"name": "Python",
"bytes": "2768871"
},
{
"name": "Shell",
"bytes": "8086"
}
],
"symlink_target": ""
} |
import rospy
import os
import sys
from sensor_msgs.msg import Image, CameraInfo
import numpy
import cv2
from cv_bridge import CvBridge
import pickle # gonna need a pickle
class ImageHandler():
""" Holds Image msgs """
def __init__(self, image_topic):
self.have_image = False
self.bridge = CvBridge()
self.sub = rospy.Subscriber(image_topic, Image, self.handle_image)
def handle_image(self, imgmsg):
self.image = self.bridge.imgmsg_to_cv2(imgmsg)
if not self.have_image:
self.have_image = True
class FaceDetector():
""" Measures face location, compiles important info. """
def __init__(self):
self.classifier = cv2.CascadeClassifier(os.environ['ROS_ROOT'] + '/../OpenCV/haarcascades/haarcascade_frontalface_alt.xml')
self.data = []
def detect_face(self, image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = self.classifier.detectMultiScale(gray, 1.3, 5)
if len(faces) > 0:
(x,y,w,h) = faces[0]
face = [x + w/2, y + h/2, w, h]
cv2.rectangle(image, (x,y), (x+w,y+h), (0,255,0), 1) # plot detected face
depth = input('What depth is this? Report in meters: ')
self.data.append([depth, face, image])
else:
rospy.loginfo('Detection failed! Try again.')
return False
cv2.imshow('Image with Face(s)', image)
cv2.waitKey(0)
return True
if __name__ == "__main__":
rospy.init_node('relate_size_to_depth')
# Handle arguments
if len(sys.argv) < 2:
rospy.logwarn('Defaulting OUTPUT file path to "default.pickle"')
path_out = 'default.pickle'
else:
path_out = sys.argv[1]
image_handler = ImageHandler('/camera/rgb/image_color')
face_detector = FaceDetector()
while not image_handler.have_image: # wait for first image
rospy.sleep(0.01)
N = input('Input number of measurements: ')
for i in range(N):
success = False
while not success:
feedback = raw_input('Press ENTER when subject gives thumbs-up, or type SKIP to move on. ')
if feedback == 'SKIP':
rospy.loginfo('Skipping measurement!')
break
else:
success = face_detector.detect_face(image_handler.image)
if success:
rospy.loginfo('Success! Moving to next measurement.')
with open(path_out, 'w') as f:
pickle.dump(face_detector.data, f)
rospy.loginfo('Pickling successful! All done.')
cv2.destroyAllWindows()
| {
"content_hash": "dde49adf1fb3acf9055839de68b2de1b",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 131,
"avg_line_length": 31.547619047619047,
"alnum_prop": 0.5916981132075472,
"repo_name": "OSUrobotics/privacy-interfaces",
"id": "fbbcd213a666ccc5aae2a1c11cb7c3362fc716dd",
"size": "2863",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "filtering/probability_filters/scripts/expanding_face_filter/relate_size_to_depth.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "83936"
},
{
"name": "C++",
"bytes": "1360235"
},
{
"name": "CMake",
"bytes": "46381"
},
{
"name": "Matlab",
"bytes": "2021"
},
{
"name": "Objective-C",
"bytes": "316"
},
{
"name": "Python",
"bytes": "364838"
}
],
"symlink_target": ""
} |
import datetime
import os
import re
import hashlib, json
from flask import Flask, jsonify, send_from_directory
from flask.ext.compress import Compress
from geoalchemy2.shape import from_shape
from shapely.geometry import box, MultiPolygon
from prop_xfer.models import db, Transfer
app = Flask("prop_xfer", static_url_path='')
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL']
db.init_app(app)
Compress(app)
NO_CACHE = bool(int(os.environ.get("NO_CACHE", "0")))
# print bool(os.environ.get("NO_CACHE", "0"))
cache_folder = os.path.dirname(os.path.realpath(__file__)) + '/cache'
#Creates cache directory
if not os.path.exists(cache_folder):
print 'Creating cache directory...'
os.makedirs(cache_folder)
def clear_cache():
folder = cache_folder
for the_file in os.listdir(folder):
file_path = os.path.join(folder, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception, e:
print e
@app.route('/')
def home():
return app.send_static_file("index.html")
@app.route('/stats', defaults={'bounds': None})
@app.route('/stats/', defaults={'bounds': None})
@app.route('/stats/<bounds>')
def stats(bounds):
cache_location = cache_folder + '/' + hashlib.md5('stats/' + str(bounds)).hexdigest() + '.json.cache'
if os.path.isfile(cache_location) and not NO_CACHE:
with open(cache_location) as r:
data = jsonify(json.loads(r.read()))
return data
else:
if bounds:
try:
m = re.match(r'((-?\d+(?:\.\d+)?),){3}(-?\d+(\.\d+)?)$', bounds)
if not m:
raise ValueError("Bounds should be longitudes/latitudes in west,south,east,north order")
w,s,e,n = map(float, bounds.split(','))
if w < -180 or w > 180 or e < -180 or e > 180:
raise ValueError("Bounds should be longitudes/latitudes in west,south,east,north order")
elif s < -90 or s > 90 or n < -90 or n > 90 or s > n:
raise ValueError("Bounds should be longitudes/latitudes in west,south,east,north order")
if e < w:
bounds = MultiPolygon([box(w, s, 180, n), box(-180, s, e, n)])
else:
bounds = MultiPolygon([box(w, s, e, n)])
except ValueError as e:
r = jsonify({"error": str(e)})
r.status_code = 400
return r
dates = {}
query = Transfer.query.distinct(Transfer.week_start)
for date in query:
q = Transfer.query.filter_by(week_start=date.return_date())
if bounds:
dates.update({str(date.return_date()) : q.filter(Transfer.location.ST_Intersects(from_shape(bounds, 4326))).count()})
else:
dates.update({str(date.return_date()) : q.count()})
if not NO_CACHE:
print "Cahing json: ", cache_location
with open(cache_location, 'w') as w:
w.write(json.dumps(dates))
return jsonify(dates)
@app.route('/week/<date>', defaults={'bounds': None})
@app.route('/week/<date>/', defaults={'bounds': None})
@app.route('/week/<date>/<bounds>')
def week_data(date, bounds):
"""
Query Transfer data for a week, optionally spatially filtered.
Returns a GeoJSON FeatureCollection.
"""
#Creates md5 link to cache file
cache_location = cache_folder + '/' + hashlib.md5('week/' + str(date) + '/' + str(bounds)).hexdigest() + '.json.cache'
if os.path.isfile(cache_location) and not NO_CACHE:
with open(cache_location) as r:
data = jsonify(json.loads(r.read()))
return data
else:
try:
# week should be in ISO YYYY-MM-DD format
week_start = datetime.datetime.strptime(date, '%Y-%m-%d').date()
except ValueError as e:
r = jsonify({"error": str(e)})
r.status_code = 400
return r
if bounds:
# Optionally, filter the results spatially
# west,south,east,north in degrees (latitude/longitude)
try:
m = re.match(r'((-?\d+(?:\.\d+)?),){3}(-?\d+(\.\d+)?)$', bounds)
if not m:
raise ValueError("Bounds should be longitudes/latitudes in west,south,east,north order")
w,s,e,n = map(float, bounds.split(','))
if w < -180 or w > 180 or e < -180 or e > 180:
raise ValueError("Bounds should be longitudes/latitudes in west,south,east,north order")
elif s < -90 or s > 90 or n < -90 or n > 90 or s > n:
raise ValueError("Bounds should be longitudes/latitudes in west,south,east,north order")
if e < w:
bounds = MultiPolygon([box(w, s, 180, n), box(-180, s, e, n)])
else:
bounds = MultiPolygon([box(w, s, e, n)])
except ValueError as e:
r = jsonify({"error": str(e)})
r.status_code = 400
return r
# Filter the transfers - the DB query happens here
query = Transfer.query.filter_by(week_start=week_start)
if bounds:
query = query.filter(Transfer.location.ST_Intersects(from_shape(bounds, 4326)))
query = query.limit(2000)
features = []
for transfer in query:
features.append(transfer.as_geojson())
# Caching data
if not NO_CACHE:
print "Cahing json: ", cache_location
with open(cache_location, 'w') as w:
w.write(json.dumps({"type": "FeatureCollection","features": features}))
# Format the response as a GeoJSON FeatureCollection
return jsonify({
"type": "FeatureCollection",
"features": features
})
@app.route('/static/<path:path>')
def static_proxy(path):
return app.send_static_file(path)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
| {
"content_hash": "43d26fa36d5527155c310066653c9b22",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 133,
"avg_line_length": 38.006211180124225,
"alnum_prop": 0.5587514299722177,
"repo_name": "rcoup/sot14-property-backend",
"id": "a15fe92485f7da045cb429d99640e2e5d440b390",
"size": "6119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prop_xfer/app.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "20265"
},
{
"name": "JavaScript",
"bytes": "113726"
},
{
"name": "Python",
"bytes": "13770"
},
{
"name": "Shell",
"bytes": "1645"
}
],
"symlink_target": ""
} |
import importlib.metadata
import json
import platform
import sys
import click
import pytest
from click.testing import CliRunner
import dask
import dask.cli
def test_version():
runner = CliRunner()
result = runner.invoke(dask.cli.cli, ["--version"])
assert result.exit_code == 0
assert result.output == f"cli, version {dask.__version__}\n"
def test_info_versions():
runner = CliRunner()
result = runner.invoke(dask.cli.versions)
assert result.exit_code == 0
# $ dask info versions
# will print to stdout a json like struct, so result.output can be
# loaded with json.
table = json.loads(result.output)
assert table["Python"] == ".".join(str(x) for x in sys.version_info[:3])
assert table["dask"] == dask.__version__
assert table["Platform"] == platform.uname().system
try:
from distributed import __version__ as distributed_version
except ImportError:
distributed_version = None
assert table["distributed"] == distributed_version
@click.group()
def dummy_cli():
pass
def bad_command():
pass
@click.command(name="good")
def good_command():
pass
@click.command(name="good")
def good_command_2():
pass
def test_register_command_ep():
from dask.cli import _register_command_ep
bad_ep = importlib.metadata.EntryPoint(
name="bad",
value="dask.tests.test_cli:bad_command",
group="dask_cli",
)
good_ep = importlib.metadata.EntryPoint(
name="good",
value="dask.tests.test_cli:good_command",
group="dask_cli",
)
with pytest.warns(UserWarning, match="must be instances of"):
_register_command_ep(dummy_cli, bad_ep)
_register_command_ep(dummy_cli, good_ep)
assert "good" in dummy_cli.commands
assert dummy_cli.commands["good"] is good_command
@click.group()
def dummy_cli_2():
pass
def test_repeated_name_registration_warn():
from dask.cli import _register_command_ep
one = importlib.metadata.EntryPoint(
name="one",
value="dask.tests.test_cli:good_command",
group="dask_cli",
)
two = importlib.metadata.EntryPoint(
name="two",
value="dask.tests.test_cli:good_command_2",
group="dask_cli",
)
_register_command_ep(dummy_cli_2, one)
with pytest.warns(UserWarning, match="While registering the command with name"):
_register_command_ep(dummy_cli_2, two)
| {
"content_hash": "6291ca5276473298ffeb102fff348c33",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 84,
"avg_line_length": 22.88785046728972,
"alnum_prop": 0.6512862392813393,
"repo_name": "dask/dask",
"id": "2adab2b77dac72a96897f33ef277289498945be5",
"size": "2449",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "dask/tests/test_cli.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jinja",
"bytes": "6086"
},
{
"name": "Python",
"bytes": "4591450"
},
{
"name": "Shell",
"bytes": "5098"
}
],
"symlink_target": ""
} |
import random
import os
import re
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn import linear_model
from sklearn import tree
from sklearn import svm
# PUT POLARITY DATASET PATH HERE
POLARITY_PATH = '/Users/marcotcr/phd/datasets/multi_domain_polarity/'
def LoadDataset(dataset_name):
if dataset_name.endswith('ng'):
if dataset_name == '2ng':
cats = ['alt.atheism', 'soc.religion.christian']
class_names = ['Atheism', 'Christianity']
if dataset_name == 'talkng':
cats = ['talk.politics.guns', 'talk.politics.misc']
class_names = ['Guns', 'PoliticalMisc']
if dataset_name == '3ng':
cats = ['comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware', 'comp.windows.x']
class_names = ['windows.misc', 'ibm.hardware', 'windows.x']
newsgroups_train = fetch_20newsgroups(subset='train',categories=cats)
newsgroups_test = fetch_20newsgroups(subset='test',categories=cats)
train_data = newsgroups_train.data
train_labels = newsgroups_train.target
test_data = newsgroups_test.data
test_labels = newsgroups_test.target
return train_data, train_labels, test_data, test_labels, class_names
if dataset_name.startswith('multi_polarity_'):
name = dataset_name.split('_')[2]
return LoadMultiDomainDataset(POLARITY_PATH + name)
def LoadMultiDomainDataset(path_data, remove_bigrams=True):
random.seed(1)
pos = []
neg = []
def get_words(line, remove_bigrams=True):
z = [tuple(x.split(':')) for x in re.findall('\w*?:\d', line)]
if remove_bigrams:
z = ' '.join([' '.join([x[0]] * int(x[1])) for x in z if '_' not in x[0]])
else:
z = ' '.join([' '.join([x[0]] * int(x[1])) for x in z])
return z
for line in open(os.path.join(path_data, 'negative.review')):
neg.append(get_words(line, remove_bigrams))
for line in open(os.path.join(path_data, 'positive.review')):
pos.append(get_words(line, remove_bigrams))
random.shuffle(pos)
random.shuffle(neg)
split_pos = int(len(pos) * .8)
split_neg = int(len(neg) * .8)
train_data = pos[:split_pos] + neg[:split_neg]
test_data = pos[split_pos:] + neg[split_neg:]
train_labels = [1] * len(pos[:split_pos]) + [0] * len(neg[:split_neg])
test_labels = [1] * len(pos[split_pos:]) + [0] * len(neg[split_neg:])
return train_data, np.array(train_labels), test_data, np.array(test_labels), ['neg', 'pos']
| {
"content_hash": "37da3e6929524e087ed7092aa95378c0",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 93,
"avg_line_length": 43.763636363636365,
"alnum_prop": 0.6622351474864977,
"repo_name": "marcotcr/lime-experiments",
"id": "8a61455c998a322ec885384a1e9fa6fd914aa377",
"size": "2407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "load_datasets.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "48469"
}
],
"symlink_target": ""
} |
'''Immutable types
'''
__all__ = ['frozendict']
class frozendict(dict):
'''Immutable dictionary.
'''
def __setitem__(self, *args, **kwargs):
raise TypeError("frozendict object does not support item assignment")
setdefault = __delitem__ = clear = pop = popitem = __setitem__
def update(self, *args):
'''Update a mutable copy with key/value pairs from b, replacing existing keys.
:returns: A mutable copy with updated pairs.
:rtype: dict
'''
d = self.copy()
d.update(*args)
return d
copy = dict.copy
'''Returns a mutable copy.
'''
def __hash__(self):
items = self.items()
res = hash(items[0])
for item in items[1:]:
res ^= hash(item)
return res
| {
"content_hash": "a5ac1e4965a75442e8c60f9758beb32b",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 82,
"avg_line_length": 21.676470588235293,
"alnum_prop": 0.5956580732700135,
"repo_name": "rsms/smisk",
"id": "137e48a78ea7322eab32a16835f1f9d622a096d8",
"size": "755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/smisk/util/frozen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "216703"
},
{
"name": "Python",
"bytes": "435347"
},
{
"name": "Shell",
"bytes": "10500"
}
],
"symlink_target": ""
} |
import re
import json
class WordsCounter(object):
""" This class counts used words in phrases and put all info to the usage statistics.
"""
@classmethod
def load_from_json(cls, file_name):
"""Method loads words usage statistics from a json file.
:param file_name: name of a file for loading words usage statistics
:return: instance of the WordsCounter class
"""
with open(file_name, 'r') as file:
return cls(json.load(file))
def __init__(self, words_map=None):
"""
:param words_map: dictionary with words usage statistics where key is a word and value is
the word usage count. Also you can pass None.
"""
self.__non_word_characters_pattern = re.compile('[\W_]+', re.UNICODE)
if words_map:
self.__words_map = words_map
else:
self.__words_map = {}
def __count_word(self, word):
self.__words_map[word.lower()] = self.__words_map.get(word, 0) + 1
def __remove_non_word_characters(self, phrase):
return self.__non_word_characters_pattern.sub(' ', phrase)
def count_words(self, phrase):
"""Method counts used words in the phrase and put it to the usage statistics.
Method converts the phrase to lower case and split by non words characters.
All used words will be put to the usage statistics.
:param phrase: phrase to analyze
"""
phrase = self.__remove_non_word_characters(phrase)
for word in phrase.split():
if word:
self.__count_word(word)
def get_top_words(self, words_count=10):
"""Get top used words from the usage statistics.
:param words_count: int value how many words should be returned. By default uses 10.
:return: dictionary where keys are words, values are words usage count.
"""
top_words = sorted(self.__words_map, key=lambda k: k, reverse=False)
top_words = sorted(top_words, key=self.__words_map.get, reverse=True)[:words_count]
return [(word, self.__words_map[word]) for word in top_words]
def save_to_json(self, file_name):
"""Save words usage statistics to a json file.
:param file_name: file name where the statistics should be saved
"""
with open(file_name, 'w') as file:
json.dump(self.__words_map, file, ensure_ascii=False)
def __len__(self):
return len(self.__words_map)
def top_words_to_string(self, words_count=10):
""" Method returns top used words as a string from the usage statistics.
The result string will have the following format.
[ word1 : word1UsageCount ][ word2 : word1UsageCount ]...
All words will be sorted by usage count. If two words have the same usage count
the will be sorted alphabetically.
If there are no words in the usage statistics the string
'words haven't been counted yet' will be returned.
:param words_count: how many words should be included in the result string
:return: string - all details please see above
"""
string = ""
if len(self) == 0:
string += "words haven't been counted yet"
else:
for (word, count) in self.get_top_words(words_count):
string += ("[ %s : %d ]" % (word, count))
return string
| {
"content_hash": "44cd5344efc5a863b904ab976f24b2c3",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 97,
"avg_line_length": 38.37078651685393,
"alnum_prop": 0.6128843338213763,
"repo_name": "sergeymironov0001/twitch-chat-bot",
"id": "1e0e6df570e567efd4debe52076472a4bc890bf7",
"size": "3415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wordscounterbot/words_counter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21082"
}
],
"symlink_target": ""
} |
logger.info("Loading 0 objects to table contacts_role...")
# fields: id, type, person, company
loader.flush_deferred_objects()
| {
"content_hash": "0a9261c650407f988ff3496ace9cd414",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 58,
"avg_line_length": 32,
"alnum_prop": 0.7421875,
"repo_name": "lino-framework/book",
"id": "30e9c3a16d40cbd8b99f0d3d1da3428254b43780",
"size": "152",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lino_book/projects/lydia/tests/dumps/18.12.0/contacts_role.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "3668"
},
{
"name": "JavaScript",
"bytes": "7140"
},
{
"name": "Python",
"bytes": "991438"
},
{
"name": "Shell",
"bytes": "989"
}
],
"symlink_target": ""
} |
from utils import crawl_folder
from utils import TCustomCounter
import chardet
LIB_SECTION_FIELD = "lib_section"
DEFAULT_ENCODING = "windows-1251"
class TIndexingObjectField(object):
def __init__(self, field_id, field_value, field_file_path):
self.field_id = field_id
self.field_file_path = field_file_path
self.field_value = field_value
class TIndexingObjectData(object):
def __init__(self, object_id, object_fields):
self.object_id = object_id
self.object_fields = object_fields
class TCrawler(object):
def __init__(self, verbosity = 0):
self.verbosity = verbosity
def crawl_object_fields(self, folder, object_id):
object_fields = crawl_folder(folder)
to_update = []
for field_path, field_id in object_fields:
to_update.append( TIndexingObjectField(field_id=field_id,
field_value="",
field_file_path=field_path ) )
return to_update
def crawl_folder(self, folder):
object_folders = crawl_folder(folder)
import sys
processed_counter = TCustomCounter("Crawler, found objects", sys.stderr, self.verbosity, 100)
for object_folder, object_id in object_folders:
fields2update = self.crawl_object_fields(object_folder, object_id)
object2update = TIndexingObjectData(object_id=object_id,
object_fields=fields2update)
yield object2update
processed_counter.add()
def crawl_csv(self, csv_file_path):
field_index2name = {1:"year",
2:"udc",
#3:"class_level1",
#4:"class_level2",
#5:"class_level3",
6:"pages_count",
7: "author",
8:"title" }
hierarchy_indices = [3, 4, 5]
import sys
processed_counter = TCustomCounter("Crawler, found objects", sys.stderr, self.verbosity, 1000)
encoding = chardet.detect(open(csv_file_path).read())['encoding']
all_hierarchy_codes = {}
for line in open(csv_file_path):
line = line.decode(encoding)
field_values = line.strip().split(";")
object_id = field_values[0]
fields = []
for field_index, field_id in field_index2name.items():
if len(field_values) > field_index:
field_value_encoded = field_values[field_index].encode(DEFAULT_ENCODING)
fields.append(TIndexingObjectField(field_id,
field_value=field_value_encoded,
field_file_path=""))
""" library section feature """
hierarchy_codes = []
import hashlib
hash = hashlib.md5()
path = ""
for hierarchy_feat_index in hierarchy_indices:
node_name = field_values[hierarchy_feat_index].strip()
if not node_name:
break
hash.update(node_name.encode("utf8"))
code = int(hash.hexdigest(), 16) % 1000000007
path += node_name + ";"
hierarchy_codes.append(code)
if not code in all_hierarchy_codes:
all_hierarchy_codes[code] = path
elif code in all_hierarchy_codes and all_hierarchy_codes[code] != path:
print "Hash collision:", path.encode("utf8"), "vs.", all_hierarchy_codes[code].encode("utf8")
print "FULL STOP"
exit()
fields.append(TIndexingObjectField(field_id=LIB_SECTION_FIELD,
field_value=hierarchy_codes,
field_file_path=""))
object2update = TIndexingObjectData(object_id=object_id,
object_fields=fields)
yield object2update
processed_counter.add()
"""
all_hierarchy_codes = [(path, code) for code, path in all_hierarchy_codes.items()]
all_hierarchy_codes.sort()
print "~~~~~ Library sections codes ~~~~~"
for path, code in all_hierarchy_codes:
print path.encode("utf8") + "\t" + str(code)
"""
| {
"content_hash": "2d82f2ab7d300a531edb5a699bccb1bd",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 113,
"avg_line_length": 44.076190476190476,
"alnum_prop": 0.5108038029386344,
"repo_name": "mavlyutovrus/light_search",
"id": "4bab60871bc0995a1905224f97fd04f37f97c54b",
"size": "4649",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/crawler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "156224"
},
{
"name": "Python",
"bytes": "77007"
},
{
"name": "Shell",
"bytes": "436"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
from functools import wraps
from django.core.cache import cache as djcache
from django.core.cache import caches
from django.conf import settings
from django.db.models import Q
from django.core.cache.backends.base import BaseCache
from typing import Any, Callable, Dict, Iterable, List, Optional, Union, TypeVar, Text
from zerver.lib.utils import statsd, statsd_key, make_safe_digest
import subprocess
import time
import base64
import random
import sys
import os
import os.path
import hashlib
import six
if False:
from zerver.models import UserProfile, Realm, Message
# These modules have to be imported for type annotations but
# they cannot be imported at runtime due to cyclic dependency.
FuncT = TypeVar('FuncT', bound=Callable[..., Any])
class NotFoundInCache(Exception):
pass
remote_cache_time_start = 0.0
remote_cache_total_time = 0.0
remote_cache_total_requests = 0
def get_remote_cache_time():
# type: () -> float
return remote_cache_total_time
def get_remote_cache_requests():
# type: () -> int
return remote_cache_total_requests
def remote_cache_stats_start():
# type: () -> None
global remote_cache_time_start
remote_cache_time_start = time.time()
def remote_cache_stats_finish():
# type: () -> None
global remote_cache_total_time
global remote_cache_total_requests
global remote_cache_time_start
remote_cache_total_requests += 1
remote_cache_total_time += (time.time() - remote_cache_time_start)
def get_or_create_key_prefix():
# type: () -> Text
if settings.CASPER_TESTS:
# This sets the prefix for the benefit of the Casper tests.
#
# Having a fixed key is OK since we don't support running
# multiple copies of the casper tests at the same time anyway.
return u'casper_tests:'
elif settings.TEST_SUITE:
# The Python tests overwrite KEY_PREFIX on each test, but use
# this codepath as well, just to save running the more complex
# code below for reading the normal key prefix.
return u'django_tests_unused:'
# directory `var` should exist in production
subprocess.check_call(["mkdir", "-p", os.path.join(settings.DEPLOY_ROOT, "var")])
filename = os.path.join(settings.DEPLOY_ROOT, "var", "remote_cache_prefix")
try:
fd = os.open(filename, os.O_CREAT | os.O_EXCL | os.O_RDWR, 0o444)
random_hash = hashlib.sha256(Text(random.getrandbits(256)).encode('utf-8')).digest()
prefix = base64.b16encode(random_hash)[:32].decode('utf-8').lower() + ':'
# This does close the underlying file
with os.fdopen(fd, 'w') as f:
f.write(prefix + "\n")
except OSError:
# The file already exists
tries = 1
while tries < 10:
with open(filename, 'r') as f:
prefix = f.readline()[:-1]
if len(prefix) == 33:
break
tries += 1
prefix = ''
time.sleep(0.5)
if not prefix:
print("Could not read remote cache key prefix file")
sys.exit(1)
return prefix
KEY_PREFIX = get_or_create_key_prefix() # type: Text
def bounce_key_prefix_for_testing(test_name):
# type: (Text) -> None
global KEY_PREFIX
KEY_PREFIX = test_name + u':' + Text(os.getpid()) + u':'
def get_cache_backend(cache_name):
# type: (Optional[str]) -> BaseCache
if cache_name is None:
return djcache
return caches[cache_name]
def get_cache_with_key(keyfunc, cache_name=None):
# type: (Any, Optional[str]) -> Any
"""
The main goal of this function getting value from the cache like in the "cache_with_key".
A cache value can contain any data including the "None", so
here used exception for case if value isn't found in the cache.
"""
def decorator(func):
# type: (Callable[..., Any]) -> (Callable[..., Any])
@wraps(func)
def func_with_caching(*args, **kwargs):
# type: (*Any, **Any) -> Callable[..., Any]
key = keyfunc(*args, **kwargs)
val = cache_get(key, cache_name=cache_name)
if val is not None:
return val[0]
raise NotFoundInCache()
return func_with_caching
return decorator
def cache_with_key(keyfunc, cache_name=None, timeout=None, with_statsd_key=None):
# type: (Any, Optional[str], Optional[int], Optional[str]) -> Any
# This function can't be typed perfectly because returning a generic function
# isn't supported in mypy - https://github.com/python/mypy/issues/1551.
"""Decorator which applies Django caching to a function.
Decorator argument is a function which computes a cache key
from the original function's arguments. You are responsible
for avoiding collisions with other uses of this decorator or
other uses of caching."""
def decorator(func):
# type: (Callable[..., Any]) -> (Callable[..., Any])
@wraps(func)
def func_with_caching(*args, **kwargs):
# type: (*Any, **Any) -> Callable[..., Any]
key = keyfunc(*args, **kwargs)
val = cache_get(key, cache_name=cache_name)
extra = ""
if cache_name == 'database':
extra = ".dbcache"
if with_statsd_key is not None:
metric_key = with_statsd_key
else:
metric_key = statsd_key(key)
status = "hit" if val is not None else "miss"
statsd.incr("cache%s.%s.%s" % (extra, metric_key, status))
# Values are singleton tuples so that we can distinguish
# a result of None from a missing key.
if val is not None:
return val[0]
val = func(*args, **kwargs)
cache_set(key, val, cache_name=cache_name, timeout=timeout)
return val
return func_with_caching
return decorator
def cache_set(key, val, cache_name=None, timeout=None):
# type: (Text, Any, Optional[str], Optional[int]) -> None
remote_cache_stats_start()
cache_backend = get_cache_backend(cache_name)
cache_backend.set(KEY_PREFIX + key, (val,), timeout=timeout)
remote_cache_stats_finish()
def cache_get(key, cache_name=None):
# type: (Text, Optional[str]) -> Any
remote_cache_stats_start()
cache_backend = get_cache_backend(cache_name)
ret = cache_backend.get(KEY_PREFIX + key)
remote_cache_stats_finish()
return ret
def cache_get_many(keys, cache_name=None):
# type: (List[Text], Optional[str]) -> Dict[Text, Any]
keys = [KEY_PREFIX + key for key in keys]
remote_cache_stats_start()
ret = get_cache_backend(cache_name).get_many(keys)
remote_cache_stats_finish()
return dict([(key[len(KEY_PREFIX):], value) for key, value in ret.items()])
def cache_set_many(items, cache_name=None, timeout=None):
# type: (Dict[Text, Any], Optional[str], Optional[int]) -> None
new_items = {}
for key in items:
new_items[KEY_PREFIX + key] = items[key]
items = new_items
remote_cache_stats_start()
get_cache_backend(cache_name).set_many(items, timeout=timeout)
remote_cache_stats_finish()
def cache_delete(key, cache_name=None):
# type: (Text, Optional[str]) -> None
remote_cache_stats_start()
get_cache_backend(cache_name).delete(KEY_PREFIX + key)
remote_cache_stats_finish()
def cache_delete_many(items, cache_name=None):
# type: (Iterable[Text], Optional[str]) -> None
remote_cache_stats_start()
get_cache_backend(cache_name).delete_many(
KEY_PREFIX + item for item in items)
remote_cache_stats_finish()
# Required Arguments are as follows:
# * object_ids: The list of object ids to look up
# * cache_key_function: object_id => cache key
# * query_function: [object_ids] => [objects from database]
# Optional keyword arguments:
# * setter: Function to call before storing items to cache (e.g. compression)
# * extractor: Function to call on items returned from cache
# (e.g. decompression). Should be the inverse of the setter
# function.
# * id_fetcher: Function mapping an object from database => object_id
# (in case we're using a key more complex than obj.id)
# * cache_transformer: Function mapping an object from database =>
# value for cache (in case the values that we're caching are some
# function of the objects, not the objects themselves)
ObjKT = TypeVar('ObjKT', int, Text)
ItemT = Any # https://github.com/python/mypy/issues/1721
CompressedItemT = Any # https://github.com/python/mypy/issues/1721
def generic_bulk_cached_fetch(cache_key_function, # type: Callable[[ObjKT], Text]
query_function, # type: Callable[[List[ObjKT]], Iterable[Any]]
object_ids, # type: Iterable[ObjKT]
extractor=lambda obj: obj, # type: Callable[[CompressedItemT], ItemT]
setter=lambda obj: obj, # type: Callable[[ItemT], CompressedItemT]
id_fetcher=lambda obj: obj.id, # type: Callable[[Any], ObjKT]
cache_transformer=lambda obj: obj # type: Callable[[Any], ItemT]
):
# type: (...) -> Dict[ObjKT, Any]
cache_keys = {} # type: Dict[ObjKT, Text]
for object_id in object_ids:
cache_keys[object_id] = cache_key_function(object_id)
cached_objects = cache_get_many([cache_keys[object_id]
for object_id in object_ids])
for (key, val) in cached_objects.items():
cached_objects[key] = extractor(cached_objects[key][0])
needed_ids = [object_id for object_id in object_ids if
cache_keys[object_id] not in cached_objects]
db_objects = query_function(needed_ids)
items_for_remote_cache = {} # type: Dict[Text, Any]
for obj in db_objects:
key = cache_keys[id_fetcher(obj)]
item = cache_transformer(obj)
items_for_remote_cache[key] = (setter(item),)
cached_objects[key] = item
if len(items_for_remote_cache) > 0:
cache_set_many(items_for_remote_cache)
return dict((object_id, cached_objects[cache_keys[object_id]]) for object_id in object_ids
if cache_keys[object_id] in cached_objects)
def cache(func):
# type: (FuncT) -> FuncT
"""Decorator which applies Django caching to a function.
Uses a key based on the function's name, filename, and
the repr() of its arguments."""
func_uniqifier = '%s-%s' % (func.__code__.co_filename, func.__name__) # type: ignore # https://github.com/python/mypy/issues/1923
@wraps(func)
def keyfunc(*args, **kwargs):
# type: (*Any, **Any) -> str
# Django complains about spaces because memcached rejects them
key = func_uniqifier + repr((args, kwargs))
return key.replace('-', '--').replace(' ', '-s')
return cache_with_key(keyfunc)(func)
def display_recipient_cache_key(recipient_id):
# type: (int) -> Text
return u"display_recipient_dict:%d" % (recipient_id,)
def user_profile_by_email_cache_key(email):
# type: (Text) -> Text
# See the comment in zerver/lib/avatar_hash.py:gravatar_hash for why we
# are proactively encoding email addresses even though they will
# with high likelihood be ASCII-only for the foreseeable future.
return u'user_profile_by_email:%s' % (make_safe_digest(email.strip()),)
def user_profile_by_id_cache_key(user_profile_id):
# type: (int) -> Text
return u"user_profile_by_id:%s" % (user_profile_id,)
# TODO: Refactor these cache helpers into another file that can import
# models.py so that python v3 style type annotations can also work.
active_user_dict_fields = [
'id', 'full_name', 'short_name', 'email',
'avatar_source', 'avatar_version',
'is_realm_admin', 'is_bot', 'timezone'] # type: List[str]
def active_user_dicts_in_realm_cache_key(realm):
# type: (Realm) -> Text
return u"active_user_dicts_in_realm:%s" % (realm.id,)
bot_dict_fields = ['id', 'full_name', 'short_name', 'email',
'is_active', 'default_sending_stream__name',
'default_events_register_stream__name',
'default_all_public_streams', 'api_key',
'bot_owner__email', 'avatar_source',
'avatar_version'] # type: List[str]
def bot_dicts_in_realm_cache_key(realm):
# type: (Realm) -> Text
return u"bot_dicts_in_realm:%s" % (realm.id,)
def get_stream_cache_key(stream_name, realm):
# type: (Text, Union[Realm, int]) -> Text
from zerver.models import Realm
if isinstance(realm, Realm):
realm_id = realm.id
else:
realm_id = realm
return u"stream_by_realm_and_name:%s:%s" % (
realm_id, make_safe_digest(stream_name.strip().lower()))
def delete_user_profile_caches(user_profiles):
# type: (Iterable[UserProfile]) -> None
keys = []
for user_profile in user_profiles:
keys.append(user_profile_by_email_cache_key(user_profile.email))
keys.append(user_profile_by_id_cache_key(user_profile.id))
cache_delete_many(keys)
def delete_display_recipient_cache(user_profile):
# type: (UserProfile) -> None
from zerver.models import Subscription # We need to import here to avoid cyclic dependency.
recipient_ids = Subscription.objects.filter(user_profile=user_profile)
recipient_ids = recipient_ids.values_list('recipient_id', flat=True)
keys = [display_recipient_cache_key(rid) for rid in recipient_ids]
cache_delete_many(keys)
# Called by models.py to flush the user_profile cache whenever we save
# a user_profile object
def flush_user_profile(sender, **kwargs):
# type: (Any, **Any) -> None
user_profile = kwargs['instance']
delete_user_profile_caches([user_profile])
# Invalidate our active_users_in_realm info dict if any user has changed
# the fields in the dict or become (in)active
if kwargs.get('update_fields') is None or \
len(set(active_user_dict_fields + ['is_active', 'email']) &
set(kwargs['update_fields'])) > 0:
cache_delete(active_user_dicts_in_realm_cache_key(user_profile.realm))
if kwargs.get('updated_fields') is None or \
'email' in kwargs['update_fields']:
delete_display_recipient_cache(user_profile)
# Invalidate our bots_in_realm info dict if any bot has
# changed the fields in the dict or become (in)active
if user_profile.is_bot and (kwargs['update_fields'] is None or
(set(bot_dict_fields) & set(kwargs['update_fields']))):
cache_delete(bot_dicts_in_realm_cache_key(user_profile.realm))
# Invalidate realm-wide alert words cache if any user in the realm has changed
# alert words
if kwargs.get('update_fields') is None or "alert_words" in kwargs['update_fields']:
cache_delete(realm_alert_words_cache_key(user_profile.realm))
# Called by models.py to flush various caches whenever we save
# a Realm object. The main tricky thing here is that Realm info is
# generally cached indirectly through user_profile objects.
def flush_realm(sender, **kwargs):
# type: (Any, **Any) -> None
realm = kwargs['instance']
users = realm.get_active_users()
delete_user_profile_caches(users)
if realm.deactivated:
cache_delete(active_user_dicts_in_realm_cache_key(realm))
cache_delete(bot_dicts_in_realm_cache_key(realm))
cache_delete(realm_alert_words_cache_key(realm))
def realm_alert_words_cache_key(realm):
# type: (Realm) -> Text
return u"realm_alert_words:%s" % (realm.string_id,)
# Called by models.py to flush the stream cache whenever we save a stream
# object.
def flush_stream(sender, **kwargs):
# type: (Any, **Any) -> None
from zerver.models import UserProfile
stream = kwargs['instance']
items_for_remote_cache = {}
items_for_remote_cache[get_stream_cache_key(stream.name, stream.realm)] = (stream,)
cache_set_many(items_for_remote_cache)
if kwargs.get('update_fields') is None or 'name' in kwargs['update_fields'] and \
UserProfile.objects.filter(
Q(default_sending_stream=stream) |
Q(default_events_register_stream=stream)).exists():
cache_delete(bot_dicts_in_realm_cache_key(stream.realm))
# TODO: Rename to_dict_cache_key_id and to_dict_cache_key
def to_dict_cache_key_id(message_id, apply_markdown):
# type: (int, bool) -> Text
return u'message_dict:%d:%d' % (message_id, apply_markdown)
def to_dict_cache_key(message, apply_markdown):
# type: (Message, bool) -> Text
return to_dict_cache_key_id(message.id, apply_markdown)
def flush_message(sender, **kwargs):
# type: (Any, **Any) -> None
message = kwargs['instance']
cache_delete(to_dict_cache_key(message, False))
cache_delete(to_dict_cache_key(message, True))
| {
"content_hash": "ab821ecf996333e53dfec77916133008",
"timestamp": "",
"source": "github",
"line_count": 439,
"max_line_length": 133,
"avg_line_length": 38.84282460136674,
"alnum_prop": 0.6407459535538353,
"repo_name": "SmartPeople/zulip",
"id": "0e0dc54c495683ca83d634c4a20e954451d84403",
"size": "17052",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/lib/cache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "354705"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "Groovy",
"bytes": "5509"
},
{
"name": "HTML",
"bytes": "561615"
},
{
"name": "JavaScript",
"bytes": "1744158"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "86990"
},
{
"name": "Python",
"bytes": "3789545"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "37831"
}
],
"symlink_target": ""
} |
class Solution:
def invertTree(self, root: TreeNode) -> TreeNode:
if root is None:
return None
# temp_node = root.left
# root.left = self.invertTree(root.right)
# root.right = self.invertTree(temp_node)
# return root
# You can choose whether to do it mutating the original object
# like above, or creating a new tree altogether!
new_left = self.invertTree(root.right)
new_right = self.invertTree(root.left)
return TreeNode(root.val, new_left, new_right)
| {
"content_hash": "b39a1505c8bd47d3ba2a06b5d17d1311",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 70,
"avg_line_length": 39.357142857142854,
"alnum_prop": 0.6225045372050817,
"repo_name": "zubie7a/Algorithms",
"id": "6386e8b75a250bacf4060ca1538e72afe4a58871",
"size": "955",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LeetCode/01_Easy/lc_226.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "281393"
},
{
"name": "Perl",
"bytes": "75318"
},
{
"name": "Python",
"bytes": "289075"
}
],
"symlink_target": ""
} |
"""llvm
Tool-specific initialization for LLVM
"""
#
# Copyright (c) 2009 VMware, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import os
import os.path
import re
import sys
import distutils.version
import SCons.Errors
import SCons.Util
def generate(env):
env['llvm'] = False
try:
llvm_dir = os.environ['LLVM']
except KeyError:
# Do nothing -- use the system headers/libs
llvm_dir = None
else:
if not os.path.isdir(llvm_dir):
raise SCons.Errors.InternalError, "Specified LLVM directory not found"
if env['debug']:
llvm_subdir = 'Debug'
else:
llvm_subdir = 'Release'
llvm_bin_dir = os.path.join(llvm_dir, llvm_subdir, 'bin')
if not os.path.isdir(llvm_bin_dir):
llvm_bin_dir = os.path.join(llvm_dir, 'bin')
if not os.path.isdir(llvm_bin_dir):
raise SCons.Errors.InternalError, "LLVM binary directory not found"
env.PrependENVPath('PATH', llvm_bin_dir)
if env['platform'] == 'windows':
# XXX: There is no llvm-config on Windows, so assume a standard layout
if llvm_dir is None:
print 'scons: LLVM environment variable must be specified when building for windows'
return
# Try to determine the LLVM version from llvm/Config/config.h
llvm_config = os.path.join(llvm_dir, 'include/llvm/Config/config.h')
if not os.path.exists(llvm_config):
print 'scons: could not find %s' % llvm_config
return
llvm_version_re = re.compile(r'^#define PACKAGE_VERSION "([^"]*)"')
llvm_version = None
for line in open(llvm_config, 'rt'):
mo = llvm_version_re.match(line)
if mo:
llvm_version = mo.group(1)
llvm_version = distutils.version.LooseVersion(llvm_version)
break
if llvm_version is None:
print 'scons: could not determine the LLVM version from %s' % llvm_config
return
env.Prepend(CPPPATH = [os.path.join(llvm_dir, 'include')])
env.AppendUnique(CPPDEFINES = [
'__STDC_LIMIT_MACROS',
'__STDC_CONSTANT_MACROS',
'HAVE_STDINT_H',
])
env.Prepend(LIBPATH = [os.path.join(llvm_dir, 'lib')])
if llvm_version >= distutils.version.LooseVersion('2.9'):
# 2.9
env.Prepend(LIBS = [
'LLVMObject', 'LLVMMCJIT', 'LLVMMCDisassembler',
'LLVMLinker', 'LLVMipo', 'LLVMInterpreter',
'LLVMInstrumentation', 'LLVMJIT', 'LLVMExecutionEngine',
'LLVMBitWriter', 'LLVMX86Disassembler', 'LLVMX86AsmParser',
'LLVMMCParser', 'LLVMX86AsmPrinter', 'LLVMX86CodeGen',
'LLVMSelectionDAG', 'LLVMX86Utils', 'LLVMX86Info', 'LLVMAsmPrinter',
'LLVMCodeGen', 'LLVMScalarOpts', 'LLVMInstCombine',
'LLVMTransformUtils', 'LLVMipa', 'LLVMAsmParser',
'LLVMArchive', 'LLVMBitReader', 'LLVMAnalysis', 'LLVMTarget',
'LLVMCore', 'LLVMMC', 'LLVMSupport',
])
elif llvm_version >= distutils.version.LooseVersion('2.7'):
# 2.7
env.Prepend(LIBS = [
'LLVMLinker', 'LLVMipo', 'LLVMInterpreter',
'LLVMInstrumentation', 'LLVMJIT', 'LLVMExecutionEngine',
'LLVMBitWriter', 'LLVMX86Disassembler', 'LLVMX86AsmParser',
'LLVMMCParser', 'LLVMX86AsmPrinter', 'LLVMX86CodeGen',
'LLVMSelectionDAG', 'LLVMX86Info', 'LLVMAsmPrinter',
'LLVMCodeGen', 'LLVMScalarOpts', 'LLVMInstCombine',
'LLVMTransformUtils', 'LLVMipa', 'LLVMAsmParser',
'LLVMArchive', 'LLVMBitReader', 'LLVMAnalysis', 'LLVMTarget',
'LLVMMC', 'LLVMCore', 'LLVMSupport', 'LLVMSystem',
])
else:
# 2.6
env.Prepend(LIBS = [
'LLVMX86AsmParser', 'LLVMX86AsmPrinter', 'LLVMX86CodeGen',
'LLVMX86Info', 'LLVMLinker', 'LLVMipo', 'LLVMInterpreter',
'LLVMInstrumentation', 'LLVMJIT', 'LLVMExecutionEngine',
'LLVMDebugger', 'LLVMBitWriter', 'LLVMAsmParser',
'LLVMArchive', 'LLVMBitReader', 'LLVMSelectionDAG',
'LLVMAsmPrinter', 'LLVMCodeGen', 'LLVMScalarOpts',
'LLVMTransformUtils', 'LLVMipa', 'LLVMAnalysis',
'LLVMTarget', 'LLVMMC', 'LLVMCore', 'LLVMSupport',
'LLVMSystem',
])
env.Append(LIBS = [
'imagehlp',
'psapi',
'shell32',
'advapi32'
])
if env['msvc']:
# Some of the LLVM C headers use the inline keyword without
# defining it.
env.Append(CPPDEFINES = [('inline', '__inline')])
if env['build'] in ('debug', 'checked'):
# LLVM libraries are static, build with /MT, and they
# automatically link agains LIBCMT. When we're doing a
# debug build we'll be linking against LIBCMTD, so disable
# that.
env.Append(LINKFLAGS = ['/nodefaultlib:LIBCMT'])
else:
if not env.Detect('llvm-config'):
print 'scons: llvm-config script not found' % llvm_version
return
llvm_version = env.backtick('llvm-config --version').rstrip()
llvm_version = distutils.version.LooseVersion(llvm_version)
try:
# Treat --cppflags specially to prevent NDEBUG from disabling
# assertion failures in debug builds.
cppflags = env.ParseFlags('!llvm-config --cppflags')
try:
cppflags['CPPDEFINES'].remove('NDEBUG')
except ValueError:
pass
env.MergeFlags(cppflags)
env.ParseConfig('llvm-config --libs')
env.ParseConfig('llvm-config --ldflags')
except OSError:
print 'scons: llvm-config version %s failed' % llvm_version
return
assert llvm_version is not None
env['llvm'] = True
print 'scons: Found LLVM version %s' % llvm_version
env['LLVM_VERSION'] = llvm_version
# Define HAVE_LLVM macro with the major/minor version number (e.g., 0x0206 for 2.6)
llvm_version_major = int(llvm_version.version[0])
llvm_version_minor = int(llvm_version.version[1])
llvm_version_hex = '0x%02x%02x' % (llvm_version_major, llvm_version_minor)
env.Prepend(CPPDEFINES = [('HAVE_LLVM', llvm_version_hex)])
def exists(env):
return True
# vim:set ts=4 sw=4 et:
| {
"content_hash": "d9354d11cffc9584b9e9010602ea56ab",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 96,
"avg_line_length": 40.53157894736842,
"alnum_prop": 0.5987534086482275,
"repo_name": "gzorin/RSXGL",
"id": "57fe922d0ae65e3ed65b57ad7c52b8a39978b911",
"size": "7701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "extsrc/mesa/scons/llvm.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "1210761"
},
{
"name": "C",
"bytes": "35145128"
},
{
"name": "C++",
"bytes": "33587085"
},
{
"name": "CSS",
"bytes": "16957"
},
{
"name": "Emacs Lisp",
"bytes": "74"
},
{
"name": "FORTRAN",
"bytes": "1377222"
},
{
"name": "Objective-C",
"bytes": "146655"
},
{
"name": "Perl",
"bytes": "361"
},
{
"name": "Python",
"bytes": "668613"
},
{
"name": "Shell",
"bytes": "70500"
},
{
"name": "XSLT",
"bytes": "4325"
}
],
"symlink_target": ""
} |
from os.path import splitext
import common
import logging
import sys
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
logger.propagate = False
logger.setLevel(logging.INFO)
# inputs are files of gzip format
def main(inputs, prefix=None):
input_filenames = inputs
# uses last extension - presumably they are all the same
extension = splitext(splitext(input_filenames[-1])[0])[1]
if prefix:
pooled_filename = prefix + "_pooled%s.gz" % (extension)
else:
pooled_filename = \
'-'.join([splitext(splitext(fn)[0])[0] for fn in input_filenames]) + "_pooled%s.gz" % (extension)
out, err = common.run_pipe([
'gzip -dc %s' % (' '.join(input_filenames)),
'gzip -cn'],
outfile=pooled_filename)
output = {
"pooled": pooled_filename
}
return output
if __name__ == '__main__':
if len(sys.argv) == 3:
main(sys.argv[1:3])
else:
main(sys.argv[1:3], prefix=sys.argv[3])
| {
"content_hash": "07a0ed62fa14d0cd3271d3d839930f1d",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 109,
"avg_line_length": 24.609756097560975,
"alnum_prop": 0.6154608523290387,
"repo_name": "ENCODE-DCC/pipeline-container",
"id": "9d2a1df571f06cd563e24b42c074551805559f16",
"size": "1045",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pool.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "138701"
},
{
"name": "Python",
"bytes": "313714"
},
{
"name": "R",
"bytes": "75661"
},
{
"name": "Roff",
"bytes": "33540"
},
{
"name": "Shell",
"bytes": "2898"
}
],
"symlink_target": ""
} |
'''
Created on Oct 2, 2013
@author: olehlong
'''
from client.GLChat import GLChatView
from nc.TreeParityMachine import TreeParityMachine, create_vector, TPMManager
import logging
def main():
logging.basicConfig(level=logging.DEBUG, format='%(levelname)-8s %(message)s')
clist = GLChatView()
clist.start_client()
clist.main()
if __name__ == '__main__':
main()
| {
"content_hash": "7d0b045d5beb10f1b2e35a8bb8dac307",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 83,
"avg_line_length": 19.7,
"alnum_prop": 0.6725888324873096,
"repo_name": "olehlong/xmpp-neural-cryptography",
"id": "a2591a16e35943293bee4ee3920277b6f9999b51",
"size": "440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "72514"
}
],
"symlink_target": ""
} |
import logging
from airavata_sdk.clients.tenant_profile_client import TenantProfileClient
from airavata_sdk.clients.keycloak_token_fetcher import Authenticator
from airavata.api.error.ttypes import TException
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
authenticator = Authenticator();
token = authenticator.get_token_and_user_info_password_flow("default-admin", "123456", "default")
# load GroupManagerClient with default configuration
#client = TenantProfileClient()
# load client with given configuration file (e.g customized_settings.ini)
client = TenantProfileClient('../transport/settings.ini')
def get_all_gateways():
try:
gws = client.get_all_gateways(token)
print("Gateways ", gws)
except TException:
logger.exception("Error occurred")
def is_gateway_exsist():
try:
gw_exisist = client.is_gateway_exist(token, "default")
print("Gateways ", gw_exisist)
except TException:
logger.exception("Error occurred")
| {
"content_hash": "4693638562f5d7c4feb5b6edbdf12db8",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 97,
"avg_line_length": 27.45945945945946,
"alnum_prop": 0.7332677165354331,
"repo_name": "apache/airavata",
"id": "a1b56128d696701e296f1b5ae9facc217203c589",
"size": "1812",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "airavata-api/airavata-client-sdks/airavata-python-sdk/airavata_sdk/samples/tenant_profile_client_samples.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2959"
},
{
"name": "Blade",
"bytes": "640580"
},
{
"name": "C",
"bytes": "29278"
},
{
"name": "C++",
"bytes": "8274651"
},
{
"name": "CSS",
"bytes": "34425"
},
{
"name": "Dockerfile",
"bytes": "7386"
},
{
"name": "HTML",
"bytes": "91922"
},
{
"name": "Java",
"bytes": "36030164"
},
{
"name": "JavaScript",
"bytes": "404261"
},
{
"name": "Jinja",
"bytes": "234378"
},
{
"name": "PHP",
"bytes": "1176284"
},
{
"name": "Python",
"bytes": "633278"
},
{
"name": "Shell",
"bytes": "153797"
},
{
"name": "Thrift",
"bytes": "472909"
},
{
"name": "XSLT",
"bytes": "3266"
}
],
"symlink_target": ""
} |
from linot.Plugins.TwitchNotifier.TwitchEngine import TwitchEngine
from nose.tools import ok_
import requests
TWITCH_REST = 'https://api.twitch.tv/kraken'
class TestTwitchEngine:
def setUp(self):
self.twitch = TwitchEngine()
def testGetChannels(self):
followed_channels = self.twitch.getChannels()
ok_(len(followed_channels) > 25) # to make sure we have tested multiget
ok_(len(set(followed_channels)) == len(followed_channels))
for ch in followed_channels:
expect_url = 'http://www.twitch.tv/'+ch.lower()
ok_(followed_channels[ch]['url'] == expect_url,
'{} <-> {}'.format(followed_channels[ch]['url'], expect_url))
def testGetLiveChannels(self):
# This is a tricky one, not sure how to properly test it..
testChannelCount = 10
live_channels = self.twitch.getLiveChannels()
error_count = 0
test_count = 0
for ch in live_channels:
ret_json = requests.get(TWITCH_REST+'/streams/'+ch).json()
try:
ok_(ret_json['stream']['channel']['display_name'] == ch)
except KeyError:
error_count += 1
test_count += 1
if test_count >= testChannelCount:
break
ok_((float(error_count) / test_count) < 0.20, 'test:{}, error:{}'.format(test_count, error_count))
def testFollowUnfollowChannel(self):
self.twitch.unfollowChannel('kaydada')
followed_channels = self.twitch.getChannels()
ok_('KayDaDa' not in followed_channels)
self.twitch.followChannel('kaydada')
followed_channels = self.twitch.getChannels()
ok_('KayDaDa' in followed_channels)
ret = self.twitch.unfollowChannel('kaydada')
ok_(ret is True)
followed_channels = self.twitch.getChannels()
ok_('KayDaDa' not in followed_channels)
name, ret = self.twitch.followChannel('kaydada2')
ok_(ret is False)
ret = self.twitch.unfollowChannel('kaydada2')
ok_(ret is False)
name, ret = self.twitch.followChannel('kaydada')
ok_(ret is True)
| {
"content_hash": "78a4100c997cfaf00bfd03942d9d7327",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 106,
"avg_line_length": 40.05555555555556,
"alnum_prop": 0.6079519186315303,
"repo_name": "hpeter/Linot",
"id": "bc0bc58586028ef2671c0f4f2bdbaefe9c0c0fcd",
"size": "2163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_temp/TwitchNotifier/TestTwitchEngine.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "75746"
}
],
"symlink_target": ""
} |
import datetime
import os
import re
import shlex
import subprocess
import unittest.mock as mock
import pytest
import marge.git
from marge.git import GIT_SSH_COMMAND
# pylint: disable=attribute-defined-outside-init
@mock.patch('marge.git._run')
class TestRepo:
def setup_method(self, _method):
self.repo = marge.git.Repo(
remote_url='ssh://git@git.foo.com/some/repo.git',
local_path='/tmp/local/path',
ssh_key_file=None,
timeout=datetime.timedelta(seconds=1),
reference=None,
)
def test_clone(self, mocked_run):
self.repo.clone()
assert get_calls(mocked_run) == [
'git clone --origin=origin ssh://git@git.foo.com/some/repo.git /tmp/local/path',
]
def test_config_user_info(self, mocked_run):
self.repo.config_user_info('bart', 'bart.simpson@gmail.com')
assert get_calls(mocked_run) == [
'git -C /tmp/local/path config user.email bart.simpson@gmail.com',
'git -C /tmp/local/path config user.name bart',
]
def test_rebase_success(self, mocked_run):
self.repo.rebase('feature_branch', 'master_of_the_universe')
assert get_calls(mocked_run) == [
'git -C /tmp/local/path fetch --prune origin',
'git -C /tmp/local/path checkout -B feature_branch origin/feature_branch --',
'git -C /tmp/local/path rebase origin/master_of_the_universe',
'git -C /tmp/local/path rev-parse HEAD'
]
def test_merge_success(self, mocked_run):
self.repo.merge('feature_branch', 'master_of_the_universe')
assert get_calls(mocked_run) == [
'git -C /tmp/local/path fetch --prune origin',
'git -C /tmp/local/path checkout -B feature_branch origin/feature_branch --',
'git -C /tmp/local/path merge origin/master_of_the_universe',
'git -C /tmp/local/path rev-parse HEAD'
]
def test_reviewer_tagging_success(self, mocked_run):
self.repo.tag_with_trailer(
trailer_name='Reviewed-by',
trailer_values=['John Simon <john@invalid>'],
branch='feature_branch',
start_commit='origin/master_of_the_universe',
)
rewrite, parse = get_calls(mocked_run)
pattern = ''.join([
'git -C /tmp/local/path filter-branch --force ',
'--msg-filter.*John Simon <john@invalid>.*origin/master_of_the_universe..feature_branch',
])
assert re.match(pattern, rewrite)
assert parse == 'git -C /tmp/local/path rev-parse HEAD'
def test_reviewer_tagging_failure(self, mocked_run):
def fail_on_filter_branch(*args, **unused_kwargs):
if 'filter-branch' in args:
raise subprocess.CalledProcessError(returncode=1, cmd='git rebase blah')
if 'rev-parse' in args or 'reset' in args:
return mock.Mock()
raise Exception('Unexpected call:', args)
mocked_run.side_effect = fail_on_filter_branch
try:
self.repo.tag_with_trailer(
trailer_name='Reviewed-by',
branch='feature_branch',
start_commit='origin/master_of_the_universe',
trailer_values=['John Simon <john@invalid.com>']
)
except marge.git.GitError:
pass
else:
assert False
rewrite, check, abort = get_calls(mocked_run)
assert 'filter-branch' in rewrite
assert check == 'git -C /tmp/local/path rev-parse refs/original/refs/heads/'
assert abort == 'git -C /tmp/local/path reset --hard refs/original/refs/heads/feature_branch'
def test_rebase_same_branch(self, mocked_run):
with pytest.raises(AssertionError):
self.repo.rebase('branch', 'branch')
assert get_calls(mocked_run) == []
def test_merge_same_branch(self, mocked_run):
with pytest.raises(AssertionError):
self.repo.merge('branch', 'branch')
assert get_calls(mocked_run) == []
def test_remove_branch(self, mocked_run):
self.repo.remove_branch('some_branch', new_current_branch='devel')
assert get_calls(mocked_run) == [
'git -C /tmp/local/path branch -D some_branch',
]
def test_remove_branch_default(self, mocked_run):
self.repo.remove_branch('some_branch')
assert get_calls(mocked_run) == [
'git -C /tmp/local/path branch -D some_branch',
]
def test_remove_master_branch_fails(self, unused_mocked_run):
with pytest.raises(AssertionError):
self.repo.remove_branch('meister', new_current_branch='meister')
def test_push_force(self, mocked_run):
mocked_run.return_value = mocked_stdout(b'')
self.repo.push('my_branch', force=True)
assert get_calls(mocked_run) == [
'git -C /tmp/local/path checkout my_branch --',
'git -C /tmp/local/path diff-index --quiet HEAD',
'git -C /tmp/local/path ls-files --others',
'git -C /tmp/local/path push --force origin my_branch:my_branch',
]
def test_push_force_fails_on_dirty(self, mocked_run):
def fail_on_diff_index(*args, **unused_kwargs):
if 'diff-index' in args:
raise subprocess.CalledProcessError(returncode=1, cmd='git diff-index blah')
mocked_run.side_effect = fail_on_diff_index
with pytest.raises(marge.git.GitError):
self.repo.push('my_branch', force=True)
assert get_calls(mocked_run) == [
'git -C /tmp/local/path checkout my_branch --',
'git -C /tmp/local/path diff-index --quiet HEAD',
]
def test_push_force_fails_on_untracked(self, mocked_run):
def fail_on_ls_files(*args, **unused_kwargs):
if 'ls-files' in args:
return mocked_stdout('some_file.txt\nanother_file.py')
return None
mocked_run.side_effect = fail_on_ls_files
with pytest.raises(marge.git.GitError):
self.repo.push('my_branch', force=True)
assert get_calls(mocked_run) == [
'git -C /tmp/local/path checkout my_branch --',
'git -C /tmp/local/path diff-index --quiet HEAD',
'git -C /tmp/local/path ls-files --others',
]
def test_get_commit_hash(self, mocked_run):
mocked_run.return_value = mocked_stdout(b'deadbeef')
commit_hash = self.repo.get_commit_hash()
assert commit_hash == 'deadbeef'
assert get_calls(mocked_run) == [
'git -C /tmp/local/path rev-parse HEAD',
]
self.repo.get_commit_hash(rev='master')
assert get_calls(mocked_run)[-1] == 'git -C /tmp/local/path rev-parse master'
def test_passes_ssh_key(self, mocked_run):
repo = self.repo._replace(ssh_key_file='/foo/id_rsa')
repo.config_user_info('bart', 'bart@gmail.com')
git_ssh = "GIT_SSH_COMMAND='%s -F /dev/null -o IdentitiesOnly=yes -i /foo/id_rsa'" % (
GIT_SSH_COMMAND,
)
assert get_calls(mocked_run) == [
'%s git -C /tmp/local/path config user.email bart@gmail.com' % git_ssh,
'%s git -C /tmp/local/path config user.name bart' % git_ssh,
]
def test_passes_reference_repo(self, mocked_run):
repo = self.repo._replace(reference='/foo/reference_repo')
repo.clone()
assert get_calls(mocked_run) == [
'git clone --origin=origin --reference=/foo/reference_repo ssh://git@git.foo.com/some/repo.git ' +
'/tmp/local/path',
]
def get_calls(mocked_run):
return [bashify(call) for call in mocked_run.call_args_list]
def bashify(call):
args, kwargs = call
args = [shlex.quote(arg) for arg in args]
env = kwargs.get('env') or {}
alt_env = [shlex.quote(k) + '=' + shlex.quote(v) for k, v in set(env.items()) - set(os.environ.items())]
return ' '.join(alt_env + args)
def mocked_stdout(stdout):
return subprocess.CompletedProcess(['blah', 'args'], 0, stdout, None)
def _filter_test(message, trailer_name, trailer_values):
script = marge.git._filter_branch_script(trailer_name, trailer_values) # pylint: disable=protected-access
result = subprocess.check_output(
[b'sh', b'-c', script.encode('utf-8')],
input=message.encode('utf-8'),
stderr=subprocess.STDOUT
)
return result.decode('utf-8')
def test_filter():
assert _filter_test('Some Stuff', 'Tested-by', []) == 'Some Stuff\n'
assert _filter_test('Some Stuff\n', 'Tested-by', []) == 'Some Stuff\n'
assert _filter_test('Some Stuff', 'Tested-by', ['T. Estes <testes@example.com>']) == '''Some Stuff
Tested-by: T. Estes <testes@example.com>
'''
test_commit_message = r'''Fix: bug in BLah.
Some stuff.
Some More stuff (really? Yeah: really!)
Reviewed-by: R. Viewer <rviewer@example.com>
Reviewed-by: R. Viewer <rviewer@example.com>
Signed-off-by: Stephen Offer <soffer@example.com>
'''
with_tested_by = _filter_test(test_commit_message, 'Tested-by', ['T. Estes <testes@example.com>'])
assert with_tested_by == '''Fix: bug in BLah.
Some stuff.
Some More stuff (really? Yeah: really!)
Reviewed-by: R. Viewer <rviewer@example.com>
Signed-off-by: Stephen Offer <soffer@example.com>
Tested-by: T. Estes <testes@example.com>
'''
with_new_reviewed_by = _filter_test(with_tested_by, 'Reviewed-by', [
'Roger Ebert <ebert@example.com>', 'John Simon <simon@example.com>'
])
assert with_new_reviewed_by == '''Fix: bug in BLah.
Some stuff.
Some More stuff (really? Yeah: really!)
Signed-off-by: Stephen Offer <soffer@example.com>
Tested-by: T. Estes <testes@example.com>
Reviewed-by: Roger Ebert <ebert@example.com>
Reviewed-by: John Simon <simon@example.com>
'''
assert _filter_test('Test: frobnificator', 'Tested-by', []) == 'Test: frobnificator\n'
assert _filter_test('Test: frobnificator', 'Tested-by', ['T. Estes <testes@example.com>']) == (
'''Test: frobnificator
Tested-by: T. Estes <testes@example.com>
'''
)
def test_filter_fails_on_empty_commit_messages():
with pytest.raises(subprocess.CalledProcessError) as exc_info:
_filter_test('', '', [])
assert exc_info.value.output == b'ERROR: Expected a non-empty commit message'
def test_filter_fails_on_commit_messages_that_are_empty_apart_from_trailers():
with pytest.raises(subprocess.CalledProcessError) as exc_info:
_filter_test(
'Tested-by: T. Estes <testes@example.com>',
'Tested-by',
['T. Estes <testes@example.com>']
)
assert exc_info.value.output == b''.join([
b'ERROR: Your commit message seems to consist only of ',
b'Trailers: Tested-by: T. Estes <testes@example.com>',
])
with pytest.raises(subprocess.CalledProcessError) as exc_info:
_filter_test('', 'Tested-by', ['T. Estes <testes@example.com>'])
assert exc_info.value.output == b'ERROR: Expected a non-empty commit message'
def test_filter_ignore_first_line_trailer_in_commit_message_if_not_set():
assert _filter_test(
'Tested-by: T. Estes <testes@example.com>',
'Reviewed-by', [
'John Simon <john@invalid>',
],
) == '''Tested-by: T. Estes <testes@example.com>
Reviewed-by: John Simon <john@invalid>
'''
| {
"content_hash": "3f83d7bb19aa20f108831d9ce9607247",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 110,
"avg_line_length": 36.851612903225806,
"alnum_prop": 0.6100315126050421,
"repo_name": "smarkets/marge-bot",
"id": "c41b71feffe4b385d0e32266d9100e2b13f7f39c",
"size": "11424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_git.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1615"
},
{
"name": "Nix",
"bytes": "35051"
},
{
"name": "Python",
"bytes": "228977"
}
],
"symlink_target": ""
} |
import os
import errno
import time
import datetime
import re
import socket
import imp
from fabric.api import task, warn, put, puts, get, local, run, execute, \
settings, abort, hosts, env, runs_once, parallel, hide
import config
from internalutil import _list, mkdir_p, valid_dir
from hostint import get_address_pair
from clockoffset import adjust_timestamps, DATA_CORRECTED_FILE_EXT
from filefinder import get_testid_file_list
from flowcache import append_flow_cache, lookup_flow_cache
from sourcefilter import SourceFilter
#############################################################################
# Flow sorting functions
#############################################################################
## Compare low keys by flow source port (lowest source port first)
# @param x Flow key of the form something_<src_ip>_<src_port>_<dst_ip>_<dst_port>
# @param y Flow key of the form something_<src_ip>_<src_port>_<dst_ip>_<dst_port>
def _cmp_src_port(x, y):
"Compare flow keys by flow source port (lowest source port first)"
xflow = str(x)
yflow = str(y)
# split into src/dst IP/port
xflow_arr = xflow.split('_')
xflow_arr = xflow_arr[len(xflow_arr)-4:len(xflow_arr)]
yflow_arr = yflow.split('_')
yflow_arr = yflow_arr[len(yflow_arr)-4:len(yflow_arr)]
# sort by numeric source port
return cmp(int(xflow_arr[1]), int(yflow_arr[1]))
## Compare flow keys by flow dest port (lowest dest port first)
# @param x Flow key of the form something_<src_ip>_<src_port>_<dst_ip>_<dst_port>
# @param y Flow key of the form something_<src_ip>_<src_port>_<dst_ip>_<dst_port>
def _cmp_dst_port(x, y):
"Compare flow keys by flow dest port (lowest dest port first)"
xflow = str(x)
yflow = str(y)
# split into src/dst IP/port
xflow_arr = xflow.split('_')
xflow_arr = xflow_arr[len(xflow_arr)-4:len(xflow_arr)]
yflow_arr = yflow.split('_')
yflow_arr = yflow_arr[len(yflow_arr)-4:len(yflow_arr)]
# sort by numeric dest port
return cmp(int(xflow_arr[3]), int(yflow_arr[3]))
## Sort flow keys
## If all flows are bidirectional, sort so that server-client flows appear
## at left and client-server flows at right. Otherwise we always have
## server-client flow followed by client-server flow (if the latter exists)
# @param files Name to file name map
# @param source_filter Source filter
# @return List of sorted (flow_name, file_name) tuples
def sort_by_flowkeys(files={}, source_filter=''):
"Sort flow names"
sorted_files = []
# convert source_filter string into list of source filters
source_filter_list = []
if source_filter != '':
for fil in source_filter.split(';'):
fil = fil.strip()
source_filter_list.append(fil)
#
# 1. if filter string was specified graph in order of filters
#
if len(source_filter_list) > 0:
for fil in source_filter_list:
# strip of the (S|D) part a the start
arr = fil.split('_')
if arr[2] == '*':
fil = arr[1] + '_'
else:
fil = arr[1] + '_' + arr[2]
# find the file entries that matches the filter
# then alphabetically sort file names for each filter
# before adding to return array. note we sort the reversed
# file names, so order is determined by flow tuple which is
# at the end of the names ([::-1] reverses the string)
# make sure we only add entry if it is not in the list yet
tmp = []
for name in files:
if fil in name and (name, files[name]) not in tmp and \
(name, files[name]) not in sorted_files:
tmp.append((name, files[name]))
sorted_files.extend(sorted(tmp, key=lambda x: x[1][::-1]))
return sorted_files
#
# 2. otherwise do our best to make sure we have a sensible and consistent
# ordering based on server ports
rev_files = {}
# sort by dest port if and only if dest port is always lower than source
# port
cmp_fct = _cmp_dst_port
for name in files:
a = name.split('_')
a = a[len(a)-4:len(a)]
if int(a[1]) < int(a[3]):
cmp_fct = _cmp_src_port
break
for name in sorted(files, cmp=cmp_fct):
# print(name)
if rev_files.get(name, '') == '':
sorted_files.append((name, files[name]))
a = name.split('_')
a = a[len(a)-4:len(a)]
rev_name = a[2] + '_' + a[3] + '_' + a[0] + '_' + a[1]
if files.get(rev_name, '') != '':
sorted_files.append((rev_name, files[rev_name]))
rev_files[rev_name] = files[rev_name]
if len(rev_files) == len(files) / 2:
# order them so that server-client are left and client-server are right
# in plot
sorted_files_c2sleft = [('', '')] * len(files)
idx = 0
for name, file_name in sorted_files:
if idx % 2 == 0:
sorted_files_c2sleft[int(idx / 2)] = (name, file_name)
else:
sorted_files_c2sleft[
int((idx - 1) / 2) + len(files) / 2] = (name, file_name)
idx += 1
return sorted_files_c2sleft
else:
return sorted_files
## Sort flow keys by group ID
## If we have groups make sure that group order is the same for all flows
# @param files (flow name, file name) tuples (sorted by sort_by_flowkeys)
# @param groups File name to group number map
# @return List of sorted (flow_name, file_name) tuples
def sort_by_group_id(files={}, groups={}):
sorted_files = [('', '')] * len(files)
if max(groups.values()) == 1:
return files
else:
num_groups = max(groups.values())
cnt = 0
for fil in files:
start = int(cnt / num_groups)
grp = groups[fil[1]]
sorted_files[start * num_groups + grp - 1] = fil
cnt += 1
return sorted_files
## Sort flow keys by group ID
## like sort_by_group_id() function, but the tuples in files are (string,list) instead
# of (string, string). Assumption: all files in one list belong to the same group!
# @param files (flow name, file name) tuples (sorted by sort_by_flowkeys)
# @param groups File name to group number map
# @return List of sorted (flow_name, file_name) tuples
def sort_by_group_id2(files={}, groups={}):
sorted_files = [('', [])] * len(files)
if max(groups.values()) == 1:
return files
else:
num_groups = max(groups.values())
cnt = 0
for fil in files:
start = int(cnt / num_groups)
grp = groups[fil[1][0]]
sorted_files[start * num_groups + grp - 1] = fil
cnt += 1
return sorted_files
###########################################################################
# Helper functions
###########################################################################
## Figure out directory for output files and create if it doesn't exist
## If out_dir is a relative path, the actual out_dir will be the directory where
## the file fname is concatenated with out_dir. If out_dir is an absolute path
## then the final out_dir will be out_dir.
# @param fname Path name of file
# @param out_dir Output directory supplied by user
# @return Path name
def get_out_dir(fname, out_dir):
#print(fname, out_dir)
if out_dir == '' or out_dir[0] != '/':
dir_name = os.path.dirname(fname)
out_dir = dir_name + '/' + out_dir
if len(out_dir) > 0 and out_dir[-1] != '/':
out_dir += '/'
mkdir_p(out_dir)
return out_dir
#############################################################################
# Plot functions
#############################################################################
## Plot time series
# @param title Title of plot at the top
# @param files Dictionary with legend names (keys) and files with the data
# to plot (values)
# @param ylab Label for y-axis
# @param yindex Index of the column in data file to plot
# @param yscaler Scaler for y-values (data in file is multiplied with the scaler)
# @param otype Type of output file
# @param oprefix Output file name prefix
# @param pdf_dir Output directory for graphs
# @param sep Character that separates columns in data file
# @param aggr Aggregation of data in time intervals
# @param omit_const '0' don't omit anything,
# '1' omit any series that are 100% constant
# (e.g. because there was no data flow)
# @param ymin Minimum value on y-axis
# @param ymax Maximum value on y-axis
# @param lnames Semicolon-separated list of legend names
# @param stime Start time of plot window in seconds
# (by default 0.0 = start of experiment)
# @param etime End time of plot window in seconds
# (by default 0.0 = end of experiment)
# @param groups Map data files to groups (all files of same experiment must have
# same group number)
# @param sort_flowkey '1' sort by flow key (default)
# '0' don't sort by flow key
# @param boxplot '0' normal time series
# '1' do boxplot for all values at one point in time
# @param plot_params Parameters passed to plot function via environment variables
# @param plot_script Specify the script used for plotting, must specify full path
# (default is config.TPCONF_script_path/plot_time_series.R)
# @param source_filter Source filter
def plot_time_series(title='', files={}, ylab='', yindex=2, yscaler=1.0, otype='',
oprefix='', pdf_dir='', sep=' ', aggr='', omit_const='0',
ymin=0, ymax=0, lnames='',
stime='0.0', etime='0.0', groups={}, sort_flowkey='1',
boxplot='', plot_params='', plot_script='', source_filter=''):
file_names = []
leg_names = []
_groups = []
#print(files)
if sort_flowkey == '1':
sorted_files = sort_by_flowkeys(files, source_filter)
else:
sorted_files = files.items()
print(sorted_files)
sorted_files = sort_by_group_id(sorted_files, groups)
print(sorted_files)
for name, file_name in sorted_files:
leg_names.append(name)
file_names.append(file_name)
_groups.append(groups[file_name])
if lnames != '':
lname_arr = lnames.split(';')
if boxplot == '0' and len(lname_arr) != len(leg_names):
abort(
'Number of legend names must be the same as the number of flows')
else:
leg_names = lname_arr
# get the directory name here if not specified
if pdf_dir == '':
pdf_dir = os.path.dirname(file_names[0]) + '/'
else:
pdf_dir = valid_dir(pdf_dir)
# if not absolute dir, make it relative to experiment_dir
# assume experiment dir is part before first slash
if pdf_dir[0] != '/':
pdf_dir = file_names[0].split('/')[0] + '/' + pdf_dir
# if pdf_dir specified create if it doesn't exist
mkdir_p(pdf_dir)
if plot_script == '':
plot_script = 'R CMD BATCH --vanilla %s/plot_time_series.R' % \
config.TPCONF_script_path
# interface between this code and the plot function are environment variables
# the following variables are passed to plot function:
# TITLE: character string that is plotted over the graph
# FNAMES: comma-separated list of file names (each file contains one date series,
# e.g. data for one flow). The format of each file is CSV-style, but the
# separator does not have to be a comma (can be set with SEP). The first
# column contains the timestamps. The second, third etc. columns contain
# data, but only one of these columns will be plotted (set with YINDEX).
# LNAMES: comma-separated list of legend names. this list has the same length
# as FNAMES and each entry corresponds to data in file name with the
# same index in FNAMES
# YLAB: y-axis label character string
# YINDEX: index of data column in file to plot on y-axis (file can have more than
# one data column)
# YSCALER: factor which is multiplied with each data value before plotting
# SEP: column separator used in data file
# OTYPE: type of output graph (default is 'pdf')
# OPREFIX: the prefix (first part) of the graph file name
# ODIR: directory where output files, e.g. pdfs are placed
# AGGR: set to '1' means data is aggregated over time intervals, more specifically
# the data is summed over the time intervals (used to determine throughput
# over time windows based on packet lengths)
# set to '0' means plot data as is
# OMIT_CONST: '0' don't omit anything,
# '1' omit any data series from plot that are 100% constant
# YMIN: minimum value on y-axis (for zooming in), default is 0
# YMAX: maximum value on y-axis (for zooming in), default is 0 meaning the
# maximum value is determined from the data
# STIME: start time on x-axis (for zooming in), default is 0.0 meaning the start
# of an experiment
# ETIME: end time on x-axis (for zooming in), default is 0.0 meaning the end of an
# experiment a determined from the data
# GROUPS: comma-separated list of group IDs (integer numbers). This list has
# the same length as FNAMES. If data from different experiments is plotted,
# each experiment will be assigned a different number and these are passed
# via GROUPS. This allows the plotting function to determine which data
# series are (or are not) from the same experiment, so that results
# from different experiments, that started at different times, can be
# plotted in the same graph.
# BOXPL: '0' plot each point on time axis
# '1' plot a boxplot over all data points from all data seres for each
# distinct timestamp (instead of a point for each a data series)
#local('which R')
local('TITLE="%s" FNAMES="%s" LNAMES="%s" YLAB="%s" YINDEX="%d" YSCALER="%f" '
'SEP="%s" OTYPE="%s" OPREFIX="%s" ODIR="%s" AGGR="%s" OMIT_CONST="%s" '
'YMIN="%s" YMAX="%s" STIME="%s" ETIME="%s" GROUPS="%s" BOXPL="%s" %s '
'%s %s%s_plot_time_series.Rout' %
(title, ','.join(file_names), ','.join(leg_names), ylab, yindex, yscaler,
sep, otype, oprefix, pdf_dir, aggr, omit_const, ymin, ymax, stime, etime,
','.join(map(str, _groups)), boxplot, plot_params,
plot_script, pdf_dir, oprefix))
if config.TPCONF_debug_level == 0:
local('rm -f %s%s_plot_time_series.Rout' % (pdf_dir, oprefix))
## Plot DASH goodput
# @param title Title of plot at the top
# @param files Dictionary with legend names (keys) and files with the data to plot
# (values)
# @param groups Map data files to groups (all files of same experiment must have
# same group number)
# @param ylab Label for y-axis
# @param otype Type of output file
# @param oprefix Output file name prefix
# @param pdf_dir Output directory for graphs
# @param sep Character that separates columns in data file
# @param ymin Minimum value on y-axis
# @param ymax Maximum value on y-axis
# @param lnames Semicolon-separated list of legend names
# @param stime Start time of plot window in seconds
# (by default 0.0 = start of experiment)
# @param etime End time of plot window in seconds (by default 0.0 = end of
# experiment)
# @param plot_params Parameters passed to plot function via environment variables
# @param plot_script Specify the script used for plotting, must specify full path
# (default is config.TPCONF_script_path/plot_dash_goodput.R)
def plot_dash_goodput(title='', files={}, groups={}, ylab='', otype='', oprefix='',
pdf_dir='', sep=' ', ymin=0, ymax=0, lnames='', stime='0.0',
etime='0.0', plot_params='', plot_script=''):
file_names = []
leg_names = []
sorted_files = sorted(files.items())
sorted_files = sort_by_group_id(sorted_files, groups)
#print(sorted_files)
for name, file_name in sorted_files:
leg_names.append(name)
file_names.append(file_name)
if lnames != '':
lname_arr = lnames.split(';')
if len(lname_arr) != len(leg_names):
abort(
'Number of legend names must be the same as the number of flows')
else:
leg_names = lname_arr
# get the directory name here if not specified
if pdf_dir == '':
pdf_dir = os.path.dirname(file_names[0]) + '/'
else:
pdf_dir = valid_dir(pdf_dir)
# if not absolute dir, make it relative to experiment_dir
# assume experiment dir is part before first slash
if pdf_dir != '/':
pdf_dir = file_names[0].split('/')[0] + '/' + pdf_dir
# if pdf_dir specified create if it doesn't exist
mkdir_p(pdf_dir)
if plot_script == '':
plot_script = 'R CMD BATCH --vanilla %s/plot_dash_goodput.R' % \
config.TPCONF_script_path
# interface between this code and the plot function are environment variables
# the following variables are passed to plot function:
# TITLE: character string that is plotted over the graph
# FNAMES: comma-separated list of file names (each file contains one date series,
# e.g. data for one flow). The format of each file is CSV-style, but the
# separator does not have to be a comma (can be set with SEP). The first
# column contains the timestamps. The second, third etc. columns contain
# data, but only one of these columns will be plotted (set with YINDEX).
# LNAMES: comma-separated list of legend names. this list has the same length
# as FNAMES and each entry corresponds to data in file name with the
# same index in FNAMES
# YLAB: y-axis label character string
# SEP: column separator used in data file
# OTYPE: type of output graph (default is 'pdf')
# OPREFIX: the prefix (first part) of the graph file name
# ODIR: directory where output files, e.g. pdfs are placed
# YMIN: minimum value on y-axis (for zooming in), default is 0
# YMAX: maximum value on y-axis (for zooming in), default is 0 meaning the
# maximum value is determined from the data
# STIME: start time on x-axis (for zooming in), default is 0.0 meaning the start
# of an experiment
# ETIME: end time on x-axis (for zooming in), default is 0.0 meaning the end of an
# experiment a determined from the data
#local('which R')
local('TITLE="%s" FNAMES="%s" LNAMES="%s" YLAB="%s" SEP="%s" OTYPE="%s" '
'OPREFIX="%s" ODIR="%s" YMIN="%s" YMAX="%s" STIME="%s" ETIME="%s" %s '
'%s %s%s_plot_dash_goodput.Rout' %
(title, ','.join(file_names), ','.join(leg_names), ylab, sep, otype, oprefix,
pdf_dir, ymin, ymax, stime, etime, plot_params, plot_script,
pdf_dir, oprefix))
if config.TPCONF_debug_level == 0:
local('rm -f %s%s_plot_dash_goodput.Rout' % (pdf_dir, oprefix))
## plot_incast_ACK_series
## (based on plot_time_series, but massages the filenames and legend names a little
## differently to handle a trial being broken into 'bursts'.)
# @param title Title of plot at the top
# @param files Dictionary with legend names (keys) and files with the data
# to plot (values)
# @param ylab Label for y-axis
# @param yindex Index of the column in data file to plot
# @param yscaler Scaler for y-values (data in file is multiplied with the scaler)
# @param otype Type of output file
# @param oprefix Output file name prefix
# @param pdf_dir Output directory for graphs
# @param sep Character that separates columns in data file
# @param aggr Aggregation of data in 1-seond intervals
# @param omit_const '0' don't omit anything,
# '1' omit any series that are 100% constant
# (e.g. because there was no data flow)
# @param ymin Minimum value on y-axis
# @param ymax Maximum value on y-axis
# @param lnames Semicolon-separated list of legend names
# @param stime Start time of plot window in seconds
# (by default 0.0 = start of experiment)
# @param etime End time of plot window in seconds
# (by default 0.0 = end of experiment)
# @param groups Map data files to groups (all files of same experiment must have
# same group number)
# @param sort_flowkey '1' sort by flow key (default)
# '0' don't sort by flow key
# @param burst_sep '0' plot seq numbers as they come, relative to 1st seq number
# > '0' plot seq numbers relative to 1st seq number after gaps
# of more than burst_sep seconds (e.g. incast query/response bursts)
# < 0, plot seq numbers relative to 1st seq number after each abs(burst_sep)
# seconds since the first burst @ t = 0 (e.g. incast query/response bursts)
# @param sburst Default 1, or a larger integer indicating the burst number of the first burst
# in the provided list of filenames. Used as an offset to calculate new legend suffixes.
# @param plot_params Parameters passed to plot function via environment variables
# @param plot_script Specify the script used for plotting, must specify full path
# (default is config.TPCONF_script_path/plot_bursts.R)
# @param source_filter Source filter
def plot_incast_ACK_series(title='', files={}, ylab='', yindex=2, yscaler=1.0, otype='',
oprefix='', pdf_dir='', sep=' ', aggr='', omit_const='0',
ymin=0, ymax=0, lnames='', stime='0.0', etime='0.0',
groups={}, sort_flowkey='1', burst_sep='1.0', sburst=1,
plot_params='', plot_script='', source_filter=''):
file_names = []
leg_names = []
_groups = []
# Pick up case where the user has supplied a number of legend names
# that doesn't match the number of distinct trials (as opposed to the
# number of bursts detected within each trial)
if lnames != '':
if len(lnames.split(";")) != len(files.keys()) :
abort(
'Number of legend names must be the same as the number of flows')
if sort_flowkey == '1':
sorted_files = sort_by_flowkeys(files, source_filter)
else:
sorted_files = files.items()
#print("MAIN: sorted_files: %s" % sorted_files)
# sort by group id
sorted_files = sort_by_group_id2(sorted_files, groups)
for name, file_name in sorted_files:
# Create a sequence of burst-specific legend names,
# derived from the flowID-based legend name.
# Keep the .R code happy by creating a groups entry
# for each burst-specific file.
for burst_index in range(len(file_name)) :
leg_names.append(name+"%"+str(burst_index+sburst))
file_names.append(file_name[burst_index])
_groups.append(groups[file_name[burst_index]])
if lnames != '':
# Create a sequence of burst-specific legend names,
# derived from the per-trial legend names provided by user.
lname_arr_orig = lnames.split(';')
lname_arr = []
i = 0
for name, file_name in sorted_files:
for burst_index in range(len(file_name)) :
lname_arr.append(lname_arr_orig[i]+"%"+str(burst_index+sburst))
i += 1
if len(lname_arr) != len(leg_names):
abort(
'Number of legend names must be the same as the number of flows')
else:
leg_names = lname_arr
# get the directory name here if not specified
if pdf_dir == '':
pdf_dir = os.path.dirname(file_names[0]) + '/'
else:
pdf_dir = valid_dir(pdf_dir)
# if no absolute path make it relative to experiment_dir
# assume experiment dir is part before first slash
if pdf_dir[0] != '/':
pdf_dir = file_names[0].split('/')[0] + '/' + pdf_dir
# if pdf_dir specified create if it doesn't exist
mkdir_p(pdf_dir)
if plot_script == '':
plot_script = 'R CMD BATCH --vanilla %s/plot_bursts.R' % \
config.TPCONF_script_path
#local('which R')
local('TITLE="%s" FNAMES="%s" LNAMES="%s" YLAB="%s" YINDEX="%d" YSCALER="%f" '
'SEP="%s" OTYPE="%s" OPREFIX="%s" ODIR="%s" AGGR="%s" OMIT_CONST="%s" '
'YMIN="%s" YMAX="%s" STIME="%s" ETIME="%s" GROUPS="%s" %s '
'BURST_SEP=1 '
'%s %s%s_plot_bursts.Rout' %
(title, ','.join(file_names), ','.join(leg_names), ylab, yindex, yscaler,
sep, otype, oprefix, pdf_dir, aggr, omit_const, ymin, ymax, stime, etime,
','.join(map(str, _groups)), plot_params, plot_script, pdf_dir, oprefix))
if config.TPCONF_debug_level == 0:
local('rm -f %s%s_plot_bursts.Rout' % (pdf_dir, oprefix))
###################################################################################
# Helper functions for extract and plot functions
###################################################################################
## Get graph output file name
# @param test_id_arr List of test IDs
# @param out_name Output file name prefix
# @return Output file name
def get_out_name(test_id_arr=[], out_name=''):
if len(test_id_arr) > 1:
if out_name != '':
return out_name + '_' + test_id_arr[0] + '_comparison'
else:
return test_id_arr[0] + '_comparison'
else:
if out_name != '':
return out_name + '_' + test_id_arr[0]
else:
return test_id_arr[0]
## Check number of data rows and include file if over minimum
# @param fname Data file name
# @param min_values Minimum number of values required
# @return True if file has more than minimum rows, False otherwise
def enough_rows(fname='', min_values='3'):
min_values = int(min_values)
#rows = int(local('wc -l %s | awk \'{ print $1 }\'' %
# fname, capture=True))
rows = 0
with open(fname, 'r') as f:
while f.readline():
rows += 1
if rows > min_values:
break
if rows > min_values:
return True
else:
return False
## Filter out data files with fewer than min_values data points
# @param files File names indexed by flow names
# @param groups Group ids indexed by file names
# @param min_values Minimum number of values required
# @return Filtered file names and groups
def filter_min_values(files={}, groups={}, min_values='3'):
out_files = {}
out_groups = {}
for name in files:
fname = files[name]
if isinstance(fname, list) :
# the ackseq method actually creates a name to list of file names
# mapping, i.e. multiple file names per dataset name
for _fname in fname:
if enough_rows(_fname, min_values):
if not name in out_files:
out_files[name] = []
out_files[name].append(_fname)
out_groups[_fname] = groups[_fname]
else:
if enough_rows(fname, min_values):
out_files[name] = fname
out_groups[fname] = groups[fname]
return (out_files, out_groups)
## Extract data per incast burst
# @param data_file File with data
# @param burst_sep Time between bursts (0.0 means no burst separation)
# @param normalize 0: leave metric values as they are (default)
# 1: normalise metric values on first value or first value
# fo each burst (if burst_sep > 0.0)
# @return List of file names (one file per burst)
def extract_bursts(data_file='', burst_sep=0.0, normalize=0):
# New filenames (source file + ".0" or ".1,.2,....N" for bursts)
new_fnames = [];
# Internal variables
burstN = 1
firstTS = -1
prev_data = -1
try:
lines = []
# First read the entire contents of a data file
with open(data_file) as f:
lines = f.readlines()
if burst_sep != 0 :
# Create the first .N output file
out_f = open(data_file + "." + "1", "w")
new_fnames.append(data_file + "." + "1")
else:
out_f = open(data_file + "." + "0", "w")
new_fnames.append(data_file + "." + "0")
# Now walk through every line of the data file
for oneline in lines:
# fields[0] is the timestamp, fields[1] is the statistic
fields = oneline.split()
if firstTS == -1 :
# This is first time through the loop, so set some baseline
# values for later offsets
firstTS = fields[0]
prevTS = firstTS
if normalize == 1:
first_data = fields[1]
else:
first_data = '0.0'
# If burst_sep == 0 the only thing we're calculating is a
# cumulative running total, so we only do burst
# identification if burst_sep != 0
if burst_sep != 0 :
if burst_sep < 0 :
# gap is time since first statistic of this burst
# (i.e. relative to firstTS)
gap = float(fields[0]) - float(firstTS)
else:
gap = float(fields[0]) - float(prevTS)
# New burst begins when time between this statistic and previous
# exceeds abs(burst_sep)
if (gap >= abs(burst_sep)) :
# We've found the first one of the _next_ burst
# Close previous burst output file
out_f.close()
# Move on to the next burst
burstN += 1
print ("Burst: %3i, ends at %f sec, data: %f bytes, gap: %3.6f sec" %
( (burstN - 1), float(prevTS), float(prev_data) - float(first_data), gap ) )
# Reset firstTS to the beginning (first timestamp) of this new burst
firstTS = fields[0]
# first data value of next burst must be considered relative to the last
# data value of the previous burst if we normalize
if normalize == 1:
first_data = prev_data
# Create the next .N output file
out_f = open(data_file + "." + str(burstN), "w")
new_fnames.append(data_file + "." + str(burstN))
# data value (potentially normalised based on first value / first value of burst
data_gap = float(fields[1]) - float(first_data)
# Write to burst-specific output file
# <time> <data>
out_f.write(fields[0] + " " + str(data_gap) + "\n")
# Store the seq number for next time around the loop
prev_data = fields[1]
prevTS = fields[0]
# Close the last output file
out_f.close()
except IOError:
print('extract_ursts(): File access problem while working on %s' % data_file)
return new_fnames
## Select bursts to plot and add files to out_files and out_groups
# @param name Flow name
# @param group Flow group
# @param data_file Data file for flow
# @param burst_sep Time between bursts in seconds
# @param sburst First burst in output
# @param eburst Last burst in output
# @param out_files Map of flow names to file names
# @param out_groups Map of file names to group numbers
# @return Updated file and group lists (with burst file data)
def select_bursts(name='', group='', data_file='', burst_sep='0.0', sburst='1', eburst='0',
out_files={}, out_groups={}):
burst_sep = float(burst_sep)
sburst = int(sburst)
eburst = int(eburst)
# do the burst extraction here,
# return a new vector of one or more filenames, pointing to file(s) containing
# <time> <statistic>
#
out_burst_files = extract_bursts(data_file = data_file, burst_sep = burst_sep)
# Incorporate the extracted .N files
# as a new, expanded set of filenames to be plotted.
# Update the out_files dictionary (key=interim legend name based on flow, value=file)
# and out_groups dictionary (key=file name, value=group)
if burst_sep == 0.0:
# Assume this is a single plot (not broken into bursts)
# The plot_time_series() function expects key to have a single string
# value rather than a vector. Take the first (and presumably only)
# entry in the vector returned by extract_bursts()
out_files[name] = out_burst_files[0]
out_groups[out_burst_files[0]] = group
else:
# This trial has been broken into one or more bursts.
# plot_incast_ACK_series() knows how to parse a key having a
# 'vector of strings' value.
# Also filter the selection based on sburst/eburst nominated by user
if eburst == 0 :
eburst = len(out_burst_files)
# Catch case when eburst was set non-zero but also > number of actual bursts
eburst = min(eburst,len(out_burst_files))
if sburst <= 0 :
sburst = 1
# Catch case where sburst set greater than eburst
if sburst > eburst :
sburst = eburst
out_files[name] = out_burst_files[sburst-1:eburst]
for tmp_f in out_burst_files[sburst-1:eburst] :
out_groups[tmp_f] = group
return (out_files, out_groups)
## Merge several data files into one data file
# @param in_files List of file names
# @return List with merged file name
def merge_data_files(in_files):
# resulting file name will be the first file name with the flow tuple replaced by
# 0.0.0.0_0_0.0.0.0_0 indicating a merged file
merge_fname = re.sub('_[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*_[0-9]*_[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*_[0-9]*',
'_0.0.0.0_0_0.0.0.0_0', in_files[0])
merge_fname += '.all'
#print(merge_fname)
f_out = open(merge_fname, 'w')
for fname in sorted(in_files):
with open(fname) as f:
lines = f.readlines()
f_out.writelines(lines)
f_out.close()
return [merge_fname]
## global list of participating hosts for each experiment
part_hosts = {}
## Get list of hosts that participated in experiment
# @param test_id Experiment id
# @return List of hosts
def get_part_hosts(test_id):
global part_hosts
if test_id not in part_hosts:
part_hosts[test_id] = []
# first process tcpdump files (ignore router and ctl interface tcpdumps)
uname_files = get_testid_file_list('', test_id,
'uname.log.gz', '')
for f in uname_files:
res = re.search('.*_(.*)_uname.log.gz', f)
if res:
part_hosts[test_id].append(res.group(1))
return part_hosts[test_id]
## map test IDs or directory names to TPCONF_host_internal_ip structures
host_internal_ip_cache = {}
## map test IDs or directory names to list of hosts (TPCONF_router + TPCONF_hosts)
host_list_cache = {}
## Get external and internal address for analysis functions
# @param test_id Experiment id
# @param host Internal or external address
# @param do_abort '0' do not abort if no external address found, '1' abort if no
# external address found
# @return Pair of external address and internal address, or pair of empty strings
# if host not part of experiment
def get_address_pair_analysis(test_id, host, do_abort='1'):
global host_internal_ip_cache
global host_list_cache
internal = ''
external = ''
TMP_CONF_FILE = '___oldconfig.py'
# XXX the whole old config access should be moved into separate module as
# similar code is also in clockoffset
# prior to TEACUP version 0.9 it was required to run the analysis with a config
# file that had config.TPCONF_host_internal_ip as it was used to run the experiment
# (or a superset of it). Since version 0.9 we use config.TPCONF_host_internal_ip
# (as well as config.TPCONF_hosts and config.TPCONF_router) from the file
# <test_id_prefix>_tpconf_vars.log.gz in the test experiment directory.
if test_id not in host_internal_ip_cache:
# first find the directory but looking for mandatory uname file
uname_file = get_testid_file_list('', test_id,
'uname.log.gz', '')
dir_name = os.path.dirname(uname_file[0])
if dir_name in host_internal_ip_cache:
# create test id cache entry from directory entry
host_internal_ip_cache[test_id] = host_internal_ip_cache[dir_name]
if host_internal_ip_cache[test_id] != None:
host_list_cache[test_id] = host_list_cache[dir_name]
else:
# try to find old config information
# look for tpconf_vars.log.gz file in that directory
var_file = local('find -L %s -name "*tpconf_vars.log.gz"' % dir_name,
capture=True)
if len(var_file) > 0:
# new approach without using config.py
# unzip archived file
local('gzip -cd %s > %s' % (var_file, TMP_CONF_FILE))
# load the TPCONF_variables into oldconfig
oldconfig = imp.load_source('oldconfig', TMP_CONF_FILE)
# remove temporary unzipped file
try:
os.remove(TMP_CONF_FILE)
os.remove(TMP_CONF_FILE + 'c') # remove the compiled file as well
except OSError:
pass
# store data in cache (both under test id and directory name)
host_internal_ip_cache[test_id] = oldconfig.TPCONF_host_internal_ip
host_list_cache[test_id] = oldconfig.TPCONF_hosts + oldconfig.TPCONF_router
host_internal_ip_cache[dir_name] = oldconfig.TPCONF_host_internal_ip
host_list_cache[dir_name] = oldconfig.TPCONF_hosts + oldconfig.TPCONF_router
else:
# old approach using the functions in hostint.py that access config.py
# store empty value in cache (both under test id and directory name)
host_internal_ip_cache[test_id] = None
host_internal_ip_cache[dir_name] = None
if host_internal_ip_cache[test_id] != None:
# new approach
# pretend it is an external name and perform lookup
internal = host_internal_ip_cache[test_id].get(host, [])
if len(internal) == 0:
# host is internal name, so need to find external name
internal = host
for e, i in host_internal_ip_cache[test_id].items():
if i[0] == host:
external = e
else:
# host is external name
internal = internal[0]
external = host
hosts = host_list_cache[test_id]
else:
# old approach
(external, internal) = get_address_pair(host, do_abort)
hosts = get_part_hosts(test_id)
if external not in hosts:
return ('', '')
else:
return (external, internal)
###################################################################################
# Main extract and plot functions
###################################################################################
## Extract DASH goodput data from httperf log files
## The extracted files have an extension of .dashgp. The format is CSV with the
## columns:
## 1. Timestamp of request (second.microsecond)
## 2. Size of requested/downloaded block (bytes)
## 3. Byte rate (mbps), equivalent to size devided by response time times 8
## 4. Response time (seconds)
## 5. Nominal/definded cycle length (seconds)
## 6. Nominal/defined rate (kbps)
## 7. Block number
# @param test_id Test ID prefix of experiment to analyse
# @param out_dir Output directory for results
# @param replot_only If '1' don't extract already extracted data
# if '0' extract data (default)
# @param dash_log_list File name with a list of dash logs
# @param ts_correct If '0' use timestamps as they are (default)
# if '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @return Test ID list, map of flow names to interim data file names, map of files
# and group ids
def _extract_dash_goodput(test_id='', out_dir='', replot_only='0', dash_log_list='',
ts_correct='1'):
"Extract DASH goodput from httperf logs"
# extension of input data files
ifile_ext = '_httperf_dash.log.gz'
# extension of output data files
ofile_ext = '.dashgp'
# files with extracted data
out_files = {}
# group ids (map each file to an experiment)
out_groups = {}
# input dash log files
dash_files = []
test_id_arr = test_id.split(';')
dash_files = get_testid_file_list(dash_log_list, test_id,
ifile_ext, '')
for dash_file in dash_files:
# set and create result directory if necessary
out_dirname = get_out_dir(dash_file, out_dir)
dash_file = dash_file.strip()
name = os.path.basename(dash_file.replace(ifile_ext, ''))
out = out_dirname + name + ofile_ext
# this extracts the req time, request size, byte rate, response time,
# nominal cycle length, nominal rate in kbps and block number
#(requires modified httperf output)
# the sed here parses the nominal cycle length, nominal rate in kbps
# and block number from the file name
if replot_only == '0' or not os.path.isfile(out):
local(
'zcat %s | grep video_files | grep -v NA | '
'awk \'{ print $1 "," $5 "," $7 "," $10 "," $14 }\' | '
'sed "s/\/video_files-\([0-9]*\)-\([0-9]*\)\/\([0-9]*\)/\\1,\\2,\\3/" > %s' %
(dash_file, out))
host = local(
'echo %s | sed "s/.*_\([a-z0-9\.]*\)_[0-9]*%s/\\1/"' %
(dash_file, ifile_ext), capture=True)
test_id = local(
'echo %s | sed "s/.*\/\(.*\)_%s_.*/\\1/"' %
(dash_file, host), capture=True)
if ts_correct == '1':
out = adjust_timestamps(test_id, out, host, ',', out_dir)
if dash_log_list != '':
# need to build test_id_arr
if test_id not in test_id_arr:
test_id_arr.append(test_id)
# else test_id_arr has the list of test ids
# group number is just the index in the list plus one (start with 1)
group = test_id_arr.index(test_id) + 1
out_files[name] = out
out_groups[out] = group
return (test_id_arr, out_files, out_groups)
## Extract DASH goodput data from httperf log files (TASK)
## SEE _extract_dash_goodput()
@task
def extract_dash_goodput(test_id='', out_dir='', replot_only='0', dash_log_list='',
out_name='', ts_correct='1'):
"Extract DASH goodput from httperf logs"
_extract_dash_goodput(test_id, out_dir, replot_only, dash_log_list, ts_correct)
# done
puts('\n[MAIN] COMPLETED extracting DASH goodput %s \n' % test_id)
## Plot DASH goodput from httperf log files
# @param test_id Test IDs of experiments to analyse (ignored if dash_log_list
# is specified)
# @param out_dir Output directory for results
# @param replot_only Don't extract data again, just redo the plot
# @param dash_log_list File name with a list of dash logs
# @param lnames Semicolon-separated list of legend names
# @param out_name Name prefix for resulting pdf file
# @param pdf_dir Output directory for pdf files (graphs),
# if not specified it is the same as out_dir
# @param ymin Minimum value on y-axis
# @param ymax Maximum value on y-axis
# @param stime Start time of plot window in seconds
# (by default 0.0 = start of experiment)
# @param etime End time of plot window in seconds (by default 0.0 = end of
# experiment)
# @param ts_correct '0' use timestamps as they are (default)
# '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @param plot_params Parameters passed to plot function via environment variables
# @param plot_script Specify the script used for plotting, must specify full path
@task
def analyse_dash_goodput(test_id='', out_dir='', replot_only='0', dash_log_list='',
lnames='', out_name='', pdf_dir='', ymin=0, ymax=0,
stime='0.0', etime='0.0', ts_correct='1', plot_params='',
plot_script=''):
"Plot DASH goodput from httperf logs"
# get list of test_ids and data files for plot
(test_id_arr,
out_files,
out_groups) = _extract_dash_goodput(test_id, out_dir, replot_only, dash_log_list,
ts_correct)
# set output file name and plot title
out_name = ''
title = ''
if dash_log_list != '':
out_name = get_out_name(dash_log_list, out_name)
title = dash_log_list
else:
out_name = get_out_name(test_id_arr, out_name)
title = test_id_arr[0]
# call plot function
plot_dash_goodput(
title,
out_files,
out_groups,
'Transferred (MB)',
'pdf',
out_name +
'_dashgp',
pdf_dir=pdf_dir,
sep=',',
ymin=float(ymin),
ymax=float(ymax),
lnames=lnames,
stime=float(stime),
etime=float(etime),
plot_params=plot_params,
plot_script=plot_script)
# done
puts('\n[MAIN] COMPLETED plotting DASH goodput %s \n' % out_name)
## Extract RTT for flows using SPP
## The extracted files have an extension of .rtts. The format is CSV with the
## columns:
## 1. Timestamp RTT measured (seconds.microseconds)
## 2. RTT (seconds)
# @param test_id Test ID prefix of experiment to analyse
# @param out_dir Output directory for results
# @param replot_only Don't extract data again that is already extracted
# @param source_filter Filter on specific sources
# @param udp_map Map that defines unidirectional UDP flows to combine. Format:
# <ip1>,<port1>:<ip2>,<port2>[;<ip3>,<port3>:<ip4>,<port4>]*
# @param ts_correct '0' use timestamps as they are (default)
# '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @param burst_sep '0' plot seq numbers as they come, relative to 1st seq number
# > '0' plot seq numbers relative to 1st seq number after gaps
# of more than burst_sep milliseconds (e.g. incast query/response bursts)
# < 0, plot seq numbers relative to 1st seq number after each abs(burst_sep)
# seconds since the first burst @ t = 0 (e.g. incast query/response bursts)
# @param sburst Start plotting with burst N (bursts are numbered from 1)
# @param eburst End plotting with burst N (bursts are numbered from 1)
# @return Test ID list, map of flow names to interim data file names and
# map of file names and group IDs
def _extract_rtt(test_id='', out_dir='', replot_only='0', source_filter='',
udp_map='', ts_correct='1', burst_sep='0.0', sburst='1', eburst='0'):
"Extract RTT of flows with SPP"
ifile_ext = '.dmp.gz'
ofile_ext = '.rtts'
already_done = {}
out_files = {}
out_groups = {}
udp_reverse_map = {}
test_id_arr = test_id.split(';')
if len(test_id_arr) == 0 or test_id_arr[0] == '':
abort('Must specify test_id parameter')
# Initialise source filter data structure
sfil = SourceFilter(source_filter)
#local('which spp')
if udp_map != '':
entries = udp_map.split(';')
for entry in entries:
# need to add forward and reverse mapping
k, v = entry.split(':')
udp_reverse_map[k] = v
udp_reverse_map[v] = k
group = 1
for test_id in test_id_arr:
# first process tcpdump files (ignore router and ctl interface tcpdumps)
tcpdump_files = get_testid_file_list('', test_id,
ifile_ext,
'grep -v "router.dmp.gz" | grep -v "ctl.dmp.gz"')
for tcpdump_file in tcpdump_files:
# get input directory name and create result directory if necessary
out_dirname = get_out_dir(tcpdump_file, out_dir)
dir_name = os.path.dirname(tcpdump_file)
# get unique flows
flows = lookup_flow_cache(tcpdump_file)
if flows == None:
flows = _list(local('zcat %s | tcpdump -nr - "tcp" | '
'awk \'{ if ( $2 == "IP" ) { print $3 " " $5 " tcp" } }\' | '
'sed "s/://" | '
'sed "s/\.\([0-9]*\) /,\\1 /g" | sed "s/ /,/g" | '
'LC_ALL=C sort -u' %
tcpdump_file, capture=True))
flows += _list(local('zcat %s | tcpdump -nr - "udp" | '
'awk \'{ if ( $2 == "IP" ) { print $3 " " $5 " udp" } }\' | '
'sed "s/://" | '
'sed "s/\.\([0-9]*\) /,\\1 /g" | sed "s/ /,/g" | '
'LC_ALL=C sort -u' %
tcpdump_file, capture=True))
append_flow_cache(tcpdump_file, flows)
# since client sends first packet to server, client-to-server flows
# will always be first
for flow in flows:
src, src_port, dst, dst_port, proto = flow.split(',')
# get external and internal addresses
src, src_internal = get_address_pair_analysis(test_id, src, do_abort='0')
dst, dst_internal = get_address_pair_analysis(test_id, dst, do_abort='0')
if src == '' or dst == '':
continue
# flow name
name = src_internal + '_' + src_port + \
'_' + dst_internal + '_' + dst_port
rev_name = dst_internal + '_' + dst_port + \
'_' + src_internal + '_' + src_port
# test id plus flow name
if len(test_id_arr) > 1:
long_name = test_id + '_' + name
long_rev_name = test_id + '_' + rev_name
else:
long_name = name
long_rev_name = rev_name
if long_name not in already_done and long_rev_name not in already_done:
# the two dump files
dump1 = dir_name + '/' + test_id + '_' + src + ifile_ext
dump2 = dir_name + '/' + test_id + '_' + dst + ifile_ext
# control the fields used by spp for generating the packet
# ids (hashes)
if proto == 'udp':
pid_fields = 2111
else:
pid_fields = 511
if proto == 'tcp':
filter1 = '(src host ' + src_internal + ' && src port ' + src_port + \
') || (' + \
'dst host ' + src_internal + ' && dst port ' + src_port + ')'
filter2 = filter1
else:
entry = udp_reverse_map.get(
src_internal + ',' + src_port, '')
if entry != '':
src2_internal, src2_port = entry.split(',')
name = src_internal + '_' + src_port + \
'_' + src2_internal + '_' + src2_port
rev_name = src2_internal + '_' + src2_port + \
'_' + src_internal + '_' + src_port
filter1 = '(src host ' + src_internal + ' && src port ' + src_port + \
') || ( ' + \
'src host ' + src2_internal + ' && src port ' + src2_port + ')'
filter2 = filter1
if rev_name in out_files:
continue
else:
warn('No entry in udp_map for %s:%s' % (src_internal, src_port))
continue
out1 = out_dirname + test_id + \
'_' + src + '_filtered_' + name + '_ref.dmp'
out2 = out_dirname + test_id + \
'_' + dst + '_filtered_' + name + '_mon.dmp'
out_rtt = out_dirname + test_id + '_' + name + ofile_ext
rev_out_rtt = out_dirname + test_id + '_' + rev_name + ofile_ext
if replot_only == '0' or not ( os.path.isfile(out_rtt) and \
os.path.isfile(rev_out_rtt) ):
# create filtered tcpdumps
local(
'zcat %s | tcpdump -nr - -w %s "%s"' %
(dump1, out1, filter1))
local(
'zcat %s | tcpdump -nr - -w %s "%s"' %
(dump2, out2, filter2))
# compute rtts with spp
local(
'spp -# %s -a %s -f %s -A %s -F %s > %s' %
(pid_fields, src_internal, out1, dst_internal, out2, out_rtt))
local(
'spp -# %s -a %s -f %s -A %s -F %s > %s' %
(pid_fields,
dst_internal,
out2,
src_internal,
out1,
rev_out_rtt))
# remove filtered tcpdumps
local('rm -f %s %s' % (out1, out2))
already_done[long_name] = 1
already_done[long_rev_name] = 1
if sfil.is_in(name):
if ts_correct == '1':
out_rtt = adjust_timestamps(test_id, out_rtt, src, ' ', out_dir)
(out_files,
out_groups) = select_bursts(long_name, group, out_rtt, burst_sep, sburst, eburst,
out_files, out_groups)
if sfil.is_in(rev_name):
if ts_correct == '1':
rev_out_rtt = adjust_timestamps(test_id, rev_out_rtt, dst, ' ',
out_dir)
(out_files,
out_groups) = select_bursts(long_rev_name, group, rev_out_rtt, burst_sep, sburst,
eburst, out_files, out_groups)
group += 1
return (test_id_arr, out_files, out_groups)
## Extract RTT for flows using SPP
## SEE _extract_rtt()
@task
def extract_rtt(test_id='', out_dir='', replot_only='0', source_filter='',
udp_map='', ts_correct='1', burst_sep='0.0', sburst='1', eburst='0'):
"Extract RTT of flows with SPP"
_extract_rtt(test_id, out_dir, replot_only, source_filter,
udp_map, ts_correct, burst_sep, sburst, eburst)
# done
puts('\n[MAIN] COMPLETED extracting RTTs %s \n' % test_id)
## Plot RTT for flows using SPP
# @param test_id Test ID prefix of experiment to analyse
# @param out_dir Output directory for results
# @param replot_only Don't extract data again, just redo the plot
# @param source_filter Filter on specific sources
# @param min_values Minimum number of data points in file, if fewer points
# the file is ignored
# @param udp_map Map that defines unidirectional UDP flows to combine. Format:
# <ip1>,<port1>:<ip2>,<port2>[;<ip3>,<port3>:<ip4>,<port4>]*
# @param omit_const '0' don't omit anything,
# '1' omit any series that are 100% constant
# (e.g. because there was no data flow)
# @param ymin Minimum value on y-axis
# @param ymax Maximum value on y-axis
# @param lnames Semicolon-separated list of legend names
# @param stime Start time of plot window in seconds
# (by default 0.0 = start of experiment)
# @param etime End time of plot window in seconds
# (by default 0.0 = end of experiment)
# @param out_name Name prefix for resulting pdf file
# @param pdf_dir Output directory for pdf files (graphs), if not specified it is
# the same as out_dir
# @param ts_correct '0' use timestamps as they are (default)
# '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @param plot_params Set env parameters for plotting
# @param plot_script Specify the script used for plotting, must specify full path
# @param burst_sep '0' plot seq numbers as they come, relative to 1st seq number
# > '0' plot seq numbers relative to 1st seq number after gaps
# of more than burst_sep milliseconds (e.g. incast query/response bursts)
# < 0, plot seq numbers relative to 1st seq number after each abs(burst_sep)
# seconds since the first burst @ t = 0 (e.g. incast query/response bursts)
# @param sburst Start plotting with burst N (bursts are numbered from 1)
# @param eburst End plotting with burst N (bursts are numbered from 1)
@task
def analyse_rtt(test_id='', out_dir='', replot_only='0', source_filter='',
min_values='3', udp_map='', omit_const='0', ymin='0', ymax='0',
lnames='', stime='0.0', etime='0.0', out_name='', pdf_dir='',
ts_correct='1', plot_params='', plot_script='', burst_sep='0.0',
sburst='1', eburst='0'):
"Plot RTT of flows with SPP"
(test_id_arr,
out_files,
out_groups) = _extract_rtt(test_id, out_dir, replot_only,
source_filter, udp_map, ts_correct,
burst_sep, sburst, eburst)
(out_files, out_groups) = filter_min_values(out_files, out_groups, min_values)
out_name = get_out_name(test_id_arr, out_name)
burst_sep = float(burst_sep)
if burst_sep == 0.0:
plot_time_series(out_name, out_files, 'SPP RTT (ms)', 2, 1000.0, 'pdf',
out_name + '_spprtt', pdf_dir=pdf_dir, omit_const=omit_const,
ymin=float(ymin), ymax=float(ymax), lnames=lnames,
stime=stime, etime=etime, groups=out_groups, plot_params=plot_params,
plot_script=plot_script, source_filter=source_filter)
else:
# Each trial has multiple files containing data from separate bursts detected within the trial
plot_incast_ACK_series(out_name, out_files, 'SPP RTT (ms)', 2, 1000.0, 'pdf',
out_name + '_spprtt', pdf_dir=pdf_dir, aggr='',
omit_const=omit_const, ymin=float(ymin), ymax=float(ymax),
lnames=lnames, stime=stime, etime=etime, groups=out_groups, burst_sep=burst_sep,
sburst=int(sburst), plot_params=plot_params, plot_script=plot_script,
source_filter=source_filter)
# done
puts('\n[MAIN] COMPLETED plotting RTTs %s \n' % out_name)
## Extract data from siftr files
# @param test_id Test ID prefix of experiment to analyse
# @param out_dir Output directory for results
# @param replot_only Don't extract data again, just redo the plot
# @param source_filter Filter on specific sources
# @param attributes Comma-separated list of attributes to extract from siftr file,
# start index is 1
# (refer to siftr documentation for column description)
# @param out_file_ext Extension for the output file containing the extracted data
# @param post_proc Name of function used for post-processing the extracted data
# @param ts_correct '0' use timestamps as they are (default)
# '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @param io_filter 'i' only use statistics from incoming packets
# 'o' only use statistics from outgoing packets
# 'io' use statistics from incooming and outgoing packets
# @return Map of flow names to interim data file names and
# map of file names and group IDs
def extract_siftr(test_id='', out_dir='', replot_only='0', source_filter='',
attributes='', out_file_ext='', post_proc=None,
ts_correct='1', io_filter='o'):
out_files = {}
out_groups = {}
if io_filter != 'i' and io_filter != 'o' and io_filter != 'io':
abort('Invalid parameter value for io_filter')
if io_filter == 'io':
io_filter = '(i|o)'
test_id_arr = test_id.split(';')
# Initialise source filter data structure
sfil = SourceFilter(source_filter)
group = 1
for test_id in test_id_arr:
# first process siftr files
siftr_files = get_testid_file_list('', test_id,
'siftr.log.gz', '', no_abort=True)
for siftr_file in siftr_files:
# get input directory name and create result directory if necessary
out_dirname = get_out_dir(siftr_file, out_dir)
if replot_only == '0':
# check that file is complete, i.e. we have the disable line
with settings(warn_only=True):
last_line = local(
'zcat %s | tail -1 | grep disable_time_secs' %
siftr_file,
capture=True)
if last_line == '':
abort('Incomplete siftr file %s' % siftr_file)
# check that we have patched siftr (27 columns)
cols = int(
local(
'zcat %s | head -2 | tail -1 | sed "s/,/ /g" | wc -w' %
siftr_file,
capture=True))
if cols < 27:
abort('siftr needs to be patched to output ertt estimates')
# we need to stop reading before the log disable line
rows = str(int(
local('zcat %s | wc -l | awk \'{ print $1 }\'' %
(siftr_file), capture=True)) - 3)
# unique flows
flows = lookup_flow_cache(siftr_file)
if flows == None:
flows = _list(
local(
'zcat %s | grep -v enable | head -%s | '
'egrep "^%s" | '
'cut -d\',\' -f 4,5,6,7 | LC_ALL=C sort -u' %
(siftr_file, rows, io_filter), capture=True))
append_flow_cache(siftr_file, flows)
for flow in flows:
src, src_port, dst, dst_port = flow.split(',')
# get external and internal addresses
src, src_internal = get_address_pair_analysis(test_id, src, do_abort='0')
dst, dst_internal = get_address_pair_analysis(test_id, dst, do_abort='0')
if src == '' or dst == '':
continue
flow_name = flow.replace(',', '_')
# test id plus flow name
if len(test_id_arr) > 1:
long_flow_name = test_id + '_' + flow_name
else:
long_flow_name = flow_name
out = out_dirname + test_id + '_' + flow_name + '_siftr.' + out_file_ext
if replot_only == '0' or not os.path.isfile(out) :
local(
'zcat %s | grep -v enable | head -%s | '
'egrep "^%s" | '
'cut -d\',\' -f 3,4,5,6,7,%s | '
'grep "%s" | cut -d\',\' -f 1,6- > %s' %
(siftr_file, rows, io_filter, attributes, flow, out))
if post_proc is not None:
post_proc(siftr_file, out)
if sfil.is_in(flow_name):
if ts_correct == '1':
host = local(
'echo %s | sed "s/.*_\([a-z0-9\.]*\)_siftr.log.gz/\\1/"' %
siftr_file,
capture=True)
out = adjust_timestamps(test_id, out, host, ',', out_dir)
out_files[long_flow_name] = out
out_groups[out] = group
group += 1
return (out_files, out_groups)
## Guess web10g version (based on first file only!)
# @param test_id Test ID prefix of experiment to analyse
def guess_version_web10g(test_id=''):
test_id_arr = test_id.split(';')
test_id = test_id_arr[0]
web10g_files = get_testid_file_list('', test_id,
'web10g.log.gz', '', no_abort=True)
# if there are no web10g files the following will return '2.0.7', but in this
# case we don't care anyway
try:
web10g_file = web10g_files[0]
colnum = local('zcat %s | sed -e "s/,/ /g" | head -1 | wc -w' % web10g_file,
capture=True)
if int(colnum) == 122:
return '2.0.7'
elif int(colnum) == 128:
return '2.0.9'
else:
return '2.0.7'
except:
return '2.0.7'
## Extract data from web10g files
# @param test_id Test ID prefix of experiment to analyse
# @param out_dir Output directory for results
# @param replot_only Don't extract data again, just redo the plot
# @param source_filter Filter on specific sources
# @param attributes Comma-separated list of attributes to extract from web10g file,
# start index is 1
# (refer to web10g documentation for column description)
# @param out_file_ext Extension for the output file containing the extracted data
# @param post_proc Name of function used for post-processing the extracted data
# @param ts_correct '0' use timestamps as they are (default)
# '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @return Map of flow names to interim data file names and
# map of file names and group IDs
def extract_web10g(test_id='', out_dir='', replot_only='0', source_filter='',
attributes='', out_file_ext='', post_proc=None,
ts_correct='1'):
out_files = {}
out_groups = {}
test_id_arr = test_id.split(';')
# Initialise source filter data structure
sfil = SourceFilter(source_filter)
group = 1
for test_id in test_id_arr:
# second process web10g files
web10g_files = get_testid_file_list('', test_id,
'web10g.log.gz', '', no_abort=True)
for web10g_file in web10g_files:
# get input directory name and create result directory if necessary
out_dirname = get_out_dir(web10g_file, out_dir)
# check for errors, unless we replot
# make sure we have exit status 0 for this, hence the final echo
if replot_only == '0':
errors = local(
'zcat %s | grep -v "runbg_wrapper.sh" | grep -v "Timestamp" '
'egrep "[a-z]+" ; echo -n ""' %
web10g_file,
capture=True)
if errors != '':
warn('Errors in %s:\n%s' % (web10g_file, errors))
# unique flows
# the sed command here suppresses the last line, cause that can be
# incomplete
flows = lookup_flow_cache(web10g_file)
if flows == None:
flows = _list(
local(
'zcat %s | egrep -v "[a-z]+" | sed -n \'$!p\' | '
'cut -d\',\' -f 3,4,5,6 | LC_ALL=C sort -u' %
(web10g_file),
capture=True))
append_flow_cache(web10g_file, flows)
for flow in flows:
src, src_port, dst, dst_port = flow.split(',')
# get external aNd internal addresses
src, src_internal = get_address_pair_analysis(test_id, src, do_abort='0')
dst, dst_internal = get_address_pair_analysis(test_id, dst, do_abort='0')
if src == '' or dst == '':
continue
flow_name = flow.replace(',', '_')
# test id plus flow name
if len(test_id_arr) > 1:
long_flow_name = test_id + '_' + flow_name
else:
long_flow_name = flow_name
out = out_dirname + test_id + '_' + flow_name + '_web10g.' + out_file_ext
if replot_only == '0' or not os.path.isfile(out) :
# the first grep removes lines with netlink errors printed out
# or last incomplete lines (sed '$d')
# (not sure how to suppress them in web10g)
# the awk command here is a little trick to not print out lines when
# no data is flying around; basically it does suppress lines if
# there is no change with respect to the fields specified.
# this makes the output comparable to siftr where we only
# have output if data is flying around.
local('zcat %s | egrep -v "[a-z]+" | sed \'$d\' | '
'cut -d\',\' -f 1,3,4,5,6,7,8,13,14,%s | grep "%s" | '
'awk -F \',\' \'!a[$2$3$4$5$6$7$8$9]++\' | cut -d\',\' -f 1,10- > %s' %
(web10g_file, attributes, flow, out))
if post_proc is not None:
post_proc(web10g_file, out)
if sfil.is_in(flow_name):
if ts_correct == '1':
host = local(
'echo %s | sed "s/.*_\([a-z0-9\.]*\)_web10g.log.gz/\\1/"' %
web10g_file,
capture=True)
out = adjust_timestamps(test_id, out, host, ',', out_dir)
out_files[long_flow_name] = out
out_groups[out] = group
group += 1
return (out_files, out_groups)
## SIFTR prints out very high cwnd (max cwnd?) values for some tcp algorithms
## at the start, remove them
# @param siftr_file Data extracted from siftr log
# @param out_file File name for post processed data
def post_proc_siftr_cwnd(siftr_file, out_file):
tmp_file = local('mktemp "tmp.XXXXXXXXXX"', capture=True)
local(
'cat %s | sed -e "1,2d\" > %s && mv %s %s' %
(out_file, tmp_file, tmp_file, out_file))
## Extract cwnd over time
## The extracted files have an extension of .cwnd. The format is CSV with the
## columns:
## 1. Timestamp RTT measured (seconds.microseconds)
## 2. CWND
# @param test_id Test ID prefix of experiment to analyse
# @param out_dir Output directory for results
# @param replot_only Don't extract data again that is extracted already
# @param source_filter Filter on specific sources
# @param ts_correct '0' use timestamps as they are (default)
# '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @param io_filter 'i' only use statistics from incoming packets
# 'o' only use statistics from outgoing packets
# 'io' use statistics from incooming and outgoing packets
# (only effective for SIFTR files)
# @return Test ID list, map of flow names to interim data file names and
# map of file names and group IDs
def _extract_cwnd(test_id='', out_dir='', replot_only='0', source_filter='',
ts_correct='1', io_filter='o'):
"Extract CWND over time"
test_id_arr = test_id.split(';')
if len(test_id_arr) == 0 or test_id_arr[0] == '':
abort('Must specify test_id parameter')
(files1,
groups1) = extract_siftr(test_id,
out_dir,
replot_only,
source_filter,
'9',
'cwnd',
post_proc_siftr_cwnd,
ts_correct=ts_correct,
io_filter=io_filter)
(files2,
groups2) = extract_web10g(test_id,
out_dir,
replot_only,
source_filter,
'26',
'cwnd',
ts_correct=ts_correct)
all_files = dict(files1.items() + files2.items())
all_groups = dict(groups1.items() + groups2.items())
return (test_id_arr, all_files, all_groups)
## Extract cwnd over time
## SEE _extract_cwnd
@task
def extract_cwnd(test_id='', out_dir='', replot_only='0', source_filter='',
ts_correct='1', io_filter='o'):
"Extract CWND over time"
_extract_cwnd(test_id, out_dir, replot_only, source_filter, ts_correct,
io_filter)
# done
puts('\n[MAIN] COMPLETED extracting CWND %s \n' % test_id)
## Analyse cwnd over time
# @param test_id Test ID prefix of experiment to analyse
# @param out_dir Output directory for results
# @param replot_only Don't extract data again, just redo the plot
# @param source_filter Filter on specific sources
# @param min_values Minimum number of data points in file, if fewer points
# the file is ignored
# @param omit_const '0' don't omit anything,
# '1' omit any series that are 100% constant
# (e.g. because there was no data flow)
# @param ymin Minimum value on y-axis
# @param ymax Maximum value on y-axis
# @param lnames Semicolon-separated list of legend names
# @param stime Start time of plot window in seconds
# (by default 0.0 = start of experiment)
# @param etime End time of plot window in seconds
# (by default 0.0 = end of experiment)
# @param out_name Name prefix for resulting pdf file
# @param pdf_dir Output directory for pdf files (graphs), if not specified it is
# the same as out_dir
# @param ts_correct '0' use timestamps as they are (default)
# '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @param io_filter 'i' only use statistics from incoming packets
# 'o' only use statistics from outgoing packets
# 'io' use statistics from incooming and outgoing packets
# (only effective for SIFTR files)
# @param plot_params Set env parameters for plotting
# @param plot_script specify the script used for plotting, must specify full path
@task
def analyse_cwnd(test_id='', out_dir='', replot_only='0', source_filter='',
min_values='3', omit_const='0', ymin='0', ymax='0', lnames='',
stime='0.0', etime='0.0', out_name='', pdf_dir='', ts_correct='1',
io_filter='o', plot_params='', plot_script=''):
"Plot CWND over time"
(test_id_arr,
out_files,
out_groups) = _extract_cwnd(test_id, out_dir, replot_only,
source_filter, ts_correct, io_filter)
if len(out_files) > 0:
(out_files, out_groups) = filter_min_values(out_files, out_groups, min_values)
out_name = get_out_name(test_id_arr, out_name)
plot_time_series(out_name, out_files, 'CWND (k)', 2, 0.001, 'pdf',
out_name + '_cwnd', pdf_dir=pdf_dir, sep=",",
omit_const=omit_const, ymin=float(ymin), ymax=float(ymax),
lnames=lnames, stime=stime, etime=etime, groups=out_groups,
plot_params=plot_params, plot_script=plot_script,
source_filter=source_filter)
# done
puts('\n[MAIN] COMPLETED plotting CWND %s \n' % out_name)
## SIFTR values are in units of tcp_rtt_scale*hz, so we need to convert to milliseconds
# @param siftr_file Data extracted from siftr log
# @param out_file File name for post processed data
def post_proc_siftr_rtt(siftr_file, out_file):
hz = local(
'zcat %s | head -1 | awk \'{ print $4 }\' | cut -d\'=\' -f 2' %
siftr_file,
capture=True)
tcp_rtt_scale = local(
'zcat %s | head -1 | awk \'{ print $5 }\' | cut -d\'=\' -f 2' %
siftr_file,
capture=True)
scaler = str(float(hz) * float(tcp_rtt_scale) / 1000)
# XXX hmm maybe do the following in python
tmp_file = local('mktemp "tmp.XXXXXXXXXX"', capture=True)
local('cat %s | awk -v scaler=%s \'BEGIN { FS = "," } ; '
'{ printf("%%s,%%.0f,%%s\\n", $1, $2/scaler, $3) }\' > %s && mv %s %s' %
(out_file, scaler, tmp_file, tmp_file, out_file))
## Extract RTT over time estimated by TCP
## The extracted files have an extension of .tcp_rtt. The format is CSV with the
## columns:
## 1. Timestamp RTT measured (seconds.microseconds)
## 2. Smoothed RTT
## 3. Sample/Unsmoothed RTT
# @param test_id Test ID prefix of experiment to analyse
# @param out_dir Output directory for results
# @param replot_only Don't extract data again that is extracted already
# @param source_filter Filter on specific sources
# @param ts_correct '0' use timestamps as they are (default)
# '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @param io_filter 'i' only use statistics from incoming packets
# 'o' only use statistics from outgoing packets
# 'io' use statistics from incooming and outgoing packets
# (only effective for SIFTR files)
# @param web10g_version web10g version string (default is 2.0.9)
# @return Test ID list, map of flow names to interim data file names and
# map of file names and group IDs
def _extract_tcp_rtt(test_id='', out_dir='', replot_only='0', source_filter='',
ts_correct='1', io_filter='o', web10g_version='2.0.9'):
"Extract RTT as seen by TCP (smoothed RTT)"
test_id_arr = test_id.split(';')
if len(test_id_arr) == 0 or test_id_arr[0] == '':
abort('Must specify test_id parameter')
# output smoothed rtt and improved sample rtt (patched siftr required),
# post process to get rtt in milliseconds
(files1,
groups1) = extract_siftr(test_id,
out_dir,
replot_only,
source_filter,
'17,27',
'tcp_rtt',
post_proc_siftr_rtt,
ts_correct=ts_correct,
io_filter=io_filter)
# output smoothed RTT and sample RTT in milliseconds
if web10g_version == '2.0.9':
web10g_version = guess_version_web10g(test_id)
if web10g_version == '2.0.7':
data_columns = '23,45'
elif web10g_version == '2.0.9':
data_columns = '23,47'
else:
data_columns = '23,45'
(files2,
groups2) = extract_web10g(test_id,
out_dir,
replot_only,
source_filter,
data_columns,
'tcp_rtt',
ts_correct=ts_correct)
all_files = dict(files1.items() + files2.items())
all_groups = dict(groups1.items() + groups2.items())
return (test_id_arr, all_files, all_groups)
## Extract RTT over time estimated by TCP
## SEE _extract_tcp_rtt
@task
def extract_tcp_rtt(test_id='', out_dir='', replot_only='0', source_filter='',
ts_correct='1', io_filter='o', web10g_version='2.0.9'):
"Extract RTT as seen by TCP (smoothed RTT)"
_extract_tcp_rtt(test_id, out_dir, replot_only, source_filter,
ts_correct, io_filter, web10g_version)
# done
puts('\n[MAIN] COMPLETED extracting TCP RTTs %s \n' % test_id)
## Plot RTT estimated by TCP over time
# @param test_id Test ID prefix of experiment to analyse
# @param out_dir Output directory for results
# @param replot_only Don't extract data again, just redo the plot
# @param source_filter Filter on specific sources
# @param min_values Datasets with fewer values won't be plotted
# @param smoothed '0' plot non-smooth RTT (enhanced RTT in case of FreeBSD),
# '1' plot smoothed RTT estimates (non enhanced RTT in case of FreeBSD)
# @param omit_const '0' don't omit anything,
# '1' omit any series that are 100% constant
# (e.g. because there was no data flow)
# @param ymin Minimum value on y-axis
# @param ymax Maximum value on y-axis
# @param lnames Semicolon-separated list of legend names
# @param stime Start time of plot window in seconds
# (by default 0.0 = start of experiment)
# @param etime End time of plot window in seconds
# (by default 0.0 = end of experiment)
# @param out_name Name prefix for resulting pdf file
# @param pdf_dir Output directory for pdf files (graphs), if not specified it is
# the same as out_dir
# @param ts_correct '0' use timestamps as they are (default)
# '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @param io_filter 'i' only use statistics from incoming packets
# 'o' only use statistics from outgoing packets
# 'io' use statistics from incooming and outgoing packets
# (only effective for SIFTR files)
# @param web10g_version web10g version string (default is 2.0.9)
# @param plot_params Set env parameters for plotting
# @param plot_script Specify the script used for plotting, must specify full path
@task
def analyse_tcp_rtt(test_id='', out_dir='', replot_only='0', source_filter='',
min_values='3', smoothed='1', omit_const='0', ymin='0', ymax='0',
lnames='', stime='0.0', etime='0.0', out_name='', pdf_dir='',
ts_correct='1', io_filter='o', web10g_version='2.0.9',
plot_params='', plot_script=''):
"Plot RTT as seen by TCP (smoothed RTT)"
(test_id_arr,
out_files,
out_groups) = _extract_tcp_rtt(test_id, out_dir, replot_only,
source_filter, ts_correct, io_filter, web10g_version)
if len(out_files) > 0:
(out_files, out_groups) = filter_min_values(out_files, out_groups, min_values)
out_name = get_out_name(test_id_arr, out_name)
if smoothed == '1':
plot_time_series(out_name, out_files, 'Smoothed TCP RTT (ms)', 2, 1.0,
'pdf', out_name + '_smooth_tcprtt', pdf_dir=pdf_dir,
sep=",", omit_const=omit_const,
ymin=float(ymin), ymax=float(ymax), lnames=lnames,
stime=stime, etime=etime, groups=out_groups,
plot_params=plot_params, plot_script=plot_script,
source_filter=source_filter)
else:
plot_time_series(out_name, out_files, 'TCP RTT (ms)', 3, 1.0, 'pdf',
out_name + '_tcprtt', pdf_dir=pdf_dir, sep=",",
omit_const=omit_const, ymin=float(ymin),
ymax=float(ymax), lnames=lnames, stime=stime,
etime=etime, groups=out_groups,
plot_params=plot_params, plot_script=plot_script,
source_filter=source_filter)
# done
puts('\n[MAIN] COMPLETED plotting TCP RTTs %s \n' % out_name)
## Extract some TCP statistic (based on siftr/web10g output)
## The extracted files have an extension of .tcpstat_<num>, where <num> is the index
## of the statistic. The format is CSV with the columns:
## 1. Timestamp RTT measured (seconds.microseconds)
## 2. TCP statistic chosen
# @param test_id Test ID prefix of experiment to analyse
# @param out_dir Output directory for results
# @param replot_only Don't extract data again that is already extracted
# @param source_filter Filter on specific sources
# @param siftr_index Integer number of the column in siftr log files
# (note if you have sitfr and web10g logs, you must also
# specify web10g_index) (default = 9, CWND)
# @param web10g_index Integer number of the column in web10g log files (note if
# you have web10g and siftr logs, you must also specify siftr_index)
# (default = 26, CWND)
# example: analyse_tcp_stat(siftr_index=17,web10_index=23,...)
# would plot smoothed RTT estimates.
# @param ts_correct '0' use timestamps as they are (default)
# '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @param io_filter 'i' only use statistics from incoming packets
# 'o' only use statistics from outgoing packets
# 'io' use statistics from incooming and outgoing packets
# (only effective for SIFTR files)
# @return Test ID list, map of flow names to interim data file names and
# map of file names and group IDs
def _extract_tcp_stat(test_id='', out_dir='', replot_only='0', source_filter='',
siftr_index='9', web10g_index='26', ts_correct='1',
io_filter='o'):
"Extract TCP Statistic"
test_id_arr = test_id.split(';')
if len(test_id_arr) == 0 or test_id_arr[0] == '':
abort('Must specify test_id parameter')
# output smoothed rtt and improved sample rtt (patched siftr required),
# post process to get rtt in milliseconds
(files1,
groups1) = extract_siftr(test_id,
out_dir,
replot_only,
source_filter,
siftr_index,
'tcpstat_' + siftr_index,
ts_correct=ts_correct,
io_filter=io_filter)
# output smoothed RTT and sample RTT in milliseconds
(files2,
groups2) = extract_web10g(test_id,
out_dir,
replot_only,
source_filter,
web10g_index,
'tcpstat_' + web10g_index,
ts_correct=ts_correct)
all_files = dict(files1.items() + files2.items())
all_groups = dict(groups1.items() + groups2.items())
return (test_id_arr, all_files, all_groups)
## Extract some TCP statistic (based on siftr/web10g output)
## SEE _extract_tcp_stat
@task
def extract_tcp_stat(test_id='', out_dir='', replot_only='0', source_filter='',
siftr_index='9', web10g_index='26', ts_correct='1',
io_filter='o'):
"Extract TCP Statistic"
_extract_tcp_stat(test_id, out_dir, replot_only, source_filter,
siftr_index, web10g_index, ts_correct, io_filter)
# done
puts('\n[MAIN] COMPLETED extracting TCP Statistic %s \n' % test_id)
## Plot some TCP statistic (based on siftr/web10g output)
# @param test_id Test ID prefix of experiment to analyse
# @param out_dir Output directory for results
# @param replot_only Don't extract data again, just redo the plot
# @param source_filter Filter on specific sources
# @param min_values Minimum number of data points in file, if fewer points
# the file is ignored
# @param omit_const '0' don't omit anything,
# '1' omit any Series that are 100% constant
# (e.g. because there was no data flow)
# @param siftr_index Integer number of the column in siftr log files
# (note if you have sitfr and web10g logs, you must also
# specify web10g_index) (default = 9, CWND)
# @param web10g_index Integer number of the column in web10g log files (note if
# you have web10g and siftr logs, you must also specify siftr_index)
# (default = 26, CWND)
# example: analyse_tcp_stat(siftr_index=17,web10_index=23,...)
# would plot smoothed RTT estimates.
# @param ylabel Label for y-axis in plot
# @param yscaler Scaler for y-axis values (must be a floating point number)
# @param ymin Minimum value on y-axis
# @param ymax Maximum value on y-axis
# @param lnames Semicolon-separated list of legend names
# @param stime Start time of plot window in seconds (by default 0.0 = start of experiment)
# @param etime End time of plot window in seconds (by default 0.0 = end of experiment)
# @param out_name Name prefix for resulting pdf file
# @param pdf_dir Output directory for pdf files (graphs), if not specified it is
# the same as out_dir
# @param ts_correct '0' use timestamps as they are (default)
# '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @param io_filter 'i' only use statistics from incoming packets
# 'o' only use statistics from outgoing packets
# 'io' use statistics from incooming and outgoing packets
# (only effective for SIFTR files)
# @param plot_params Set env parameters for plotting
# @param plot_script Specify the script used for plotting, must specify full path
@task
def analyse_tcp_stat(test_id='', out_dir='', replot_only='0', source_filter='',
min_values='3', omit_const='0', siftr_index='9', web10g_index='26',
ylabel='', yscaler='1.0', ymin='0', ymax='0', lnames='',
stime='0.0', etime='0.0', out_name='', pdf_dir='', ts_correct='1',
io_filter='o', plot_params='', plot_script=''):
"Compute TCP Statistic"
(test_id_arr,
out_files,
out_groups) =_extract_tcp_stat(test_id, out_dir, replot_only, source_filter,
siftr_index, web10g_index, ts_correct, io_filter)
if len(out_files) > 0:
(out_files, out_groups) = filter_min_values(out_files, out_groups, min_values)
out_name = get_out_name(test_id_arr, out_name)
plot_time_series(out_name, out_files, ylabel, 2, float(yscaler), 'pdf',
out_name + '_tcpstat_' +
siftr_index + '_' + web10g_index,
pdf_dir=pdf_dir, sep=",", omit_const=omit_const,
ymin=float(ymin), ymax=float(ymax), lnames=lnames, stime=stime,
etime=etime, groups=out_groups, plot_params=plot_params,
plot_script=plot_script, source_filter=source_filter)
# done
puts('\n[MAIN] COMPLETED plotting TCP Statistic %s \n' % out_name)
## Extract packet sizes. Plot function computes throughput based on the packet sizes.
## The extracted files have an extension of .psiz. The format is CSV with the
## columns:
## 1. Timestamp RTT measured (seconds.microseconds)
## 2. Packet size (bytes)
# @param test_id Test ID prefix of experiment to analyse
# @param out_dir Output directory for results
# @param replot_only Don't extract data again that is already extracted
# @param source_filter Filter on specific sources
# @param link_len '0' throughput based on IP length (default),
# '1' throughput based on link-layer length
# @param ts_correct '0' use timestamps as they are (default)
# '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @return Test ID list, map of flow names to interim data file names and
# map of file names and group IDs
def _extract_pktsizes(test_id='', out_dir='', replot_only='0', source_filter='',
link_len='0', ts_correct='1', total_per_experiment='0'):
"Extract throughput for generated traffic flows"
ifile_ext = '.dmp.gz'
ofile_ext = '.psiz'
already_done = {}
out_files = {}
out_groups = {}
test_id_arr = test_id.split(';')
if len(test_id_arr) == 0 or test_id_arr[0] == '':
abort('Must specify test_id parameter')
# Initialise source filter data structure
sfil = SourceFilter(source_filter)
group = 1
for test_id in test_id_arr:
# first process tcpdump files (ignore router and ctl interface tcpdumps)
tcpdump_files = get_testid_file_list('', test_id,
ifile_ext,
'grep -v "router.dmp.gz" | grep -v "ctl.dmp.gz"')
for tcpdump_file in tcpdump_files:
# get input directory name and create result directory if necessary
out_dirname = get_out_dir(tcpdump_file, out_dir)
dir_name = os.path.dirname(tcpdump_file)
# unique flows
flows = lookup_flow_cache(tcpdump_file)
if flows == None:
flows = _list(local('zcat %s | tcpdump -nr - "tcp" | '
'awk \'{ if ( $2 == "IP" ) { print $3 " " $5 " tcp" } }\' | '
'sed "s/://" | '
'sed "s/\.\([0-9]*\) /,\\1 /g" | sed "s/ /,/g" | '
'LC_ALL=C sort -u' %
tcpdump_file, capture=True))
flows += _list(local('zcat %s | tcpdump -nr - "udp" | '
'awk \'{ if ( $2 == "IP" ) { print $3 " " $5 " udp" } }\' | '
'sed "s/://" | '
'sed "s/\.\([0-9]*\) /,\\1 /g" | sed "s/ /,/g" | '
'LC_ALL=C sort -u' %
tcpdump_file, capture=True))
append_flow_cache(tcpdump_file, flows)
# since client sends first packet to server, client-to-server flows
# will always be first
for flow in flows:
src, src_port, dst, dst_port, proto = flow.split(',')
# get external and internal addresses
src, src_internal = get_address_pair_analysis(test_id, src, do_abort='0')
dst, dst_internal = get_address_pair_analysis(test_id, dst, do_abort='0')
if src == '' or dst == '':
continue
# flow name
name = src_internal + '_' + src_port + \
'_' + dst_internal + '_' + dst_port
rev_name = dst_internal + '_' + dst_port + \
'_' + src_internal + '_' + src_port
# test id plus flow name
if len(test_id_arr) > 1:
long_name = test_id + '_' + name
long_rev_name = test_id + '_' + rev_name
else:
long_name = name
long_rev_name = rev_name
# the two dump files
dump1 = dir_name + '/' + test_id + '_' + src + ifile_ext
dump2 = dir_name + '/' + test_id + '_' + dst + ifile_ext
# tcpdump filters and output file names
filter1 = 'src host ' + src_internal + ' && src port ' + src_port + \
' && dst host ' + dst_internal + ' && dst port ' + dst_port
filter2 = 'src host ' + dst_internal + ' && src port ' + dst_port + \
' && dst host ' + src_internal + ' && dst port ' + src_port
out_size1 = out_dirname + test_id + '_' + name + ofile_ext
out_size2 = out_dirname + test_id + '_' + rev_name + ofile_ext
if long_name not in already_done and long_rev_name not in already_done:
if replot_only == '0' or not ( os.path.isfile(out_size1) and \
os.path.isfile(out_size2) ):
# make sure for each flow we get the packet sizes captured
# at the _receiver_, hence we use filter1 with dump2 ...
if link_len == '0':
local(
'zcat %s | tcpdump -v -tt -nr - "%s" | '
'awk \'{ print $1 " " $NF }\' | grep ")$" | sed -e "s/)//" > %s' %
(dump2, filter1, out_size1))
local(
'zcat %s | tcpdump -v -tt -nr - "%s" | '
'awk \'{ print $1 " " $NF }\' | grep ")$" | sed -e "s/)//" > %s' %
(dump1, filter2, out_size2))
else:
local(
'zcat %s | tcpdump -e -tt -nr - "%s" | grep "ethertype IP" | '
'awk \'{ print $1 " " $9 }\' | sed -e "s/://" > %s' %
(dump2, filter1, out_size1))
local(
'zcat %s | tcpdump -e -tt -nr - "%s" | grep "ethertype IP" | '
'awk \'{ print $1 " " $9 }\' | sed -e "s/://" > %s' %
(dump1, filter2, out_size2))
already_done[long_name] = 1
already_done[long_rev_name] = 1
if sfil.is_in(name):
if ts_correct == '1':
out_size1 = adjust_timestamps(test_id, out_size1, dst, ' ', out_dir)
out_files[long_name] = out_size1
out_groups[out_size1] = group
if sfil.is_in(rev_name):
if ts_correct == '1':
out_size2 = adjust_timestamps(test_id, out_size2, src, ' ', out_dir)
out_files[long_rev_name] = out_size2
out_groups[out_size2] = group
# if desired compute aggregate packet kength data for each experiment
if total_per_experiment == '1':
files_list = ''
for name in out_files:
if out_groups[out_files[name]] == group:
files_list += out_files[name] + ' '
out_size1 = out_dirname + test_id + '_total' + ofile_ext
# cat everything together and sort by timestamp
local('cat %s | sort -k 1,1 > %s' % (files_list, out_size1))
# replace all files for separate flows with total
delete_list = []
for name in out_files:
if out_groups[out_files[name]] == group:
delete_list.append(name)
for d in delete_list:
del out_groups[out_files[d]]
del out_files[d]
name = test_id
out_files[name] = out_size1
out_groups[out_size1] = group
group += 1
return (test_id_arr, out_files, out_groups)
## Extract packet sizes. The plot function computes throughput based on the packet sizes.
## SEE _extract_pktsizes
@task
def extract_pktsizes(test_id='', out_dir='', replot_only='0', source_filter='',
link_len='0', ts_correct='1', total_per_experiment='0'):
"Extract throughput for generated traffic flows"
_extract_pktsizes(test_id, out_dir, replot_only, source_filter, link_len,
ts_correct, total_per_experiment)
# done
puts('\n[MAIN] COMPLETED extracting packet sizes %s \n' % test_id)
## Plot throughput
# @param test_id Test ID prefix of experiment to analyse
# @param out_dir Output directory for results
# @param replot_only Don't extract data again, just redo the plot
# @param source_filter Filter on specific sources
# @param min_values Minimum number of data points in file, if fewer points
# the file is ignored
# @param omit_const '0' don't omit anything,
# '1' omit any series that are 100% constant
# (e.g. because there was no data flow)
# @param ymin Minimum value on y-axis
# @param ymax Maximum value on y-axis
# @param lnames Semicolon-separated list of legend names
# @param link_len '0' throughput based on IP length (default),
# '1' throughput based on link-layer length
# @param stime Start time of plot window in seconds
# (by default 0.0 = start of experiment)
# @param etime End time of plot window in seconds (by default 0.0 = end of experiment)
# @param out_name Name prefix for resulting pdf file
# @param pdf_dir Output directory for pdf files (graphs), if not specified it is
# the same as out_dir
# @param ts_correct '0' use timestamps as they are (default)
# '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @param plot_params: set env parameters for plotting
# @param plot_script: specify the script used for plotting, must specify full path
# @param total_per_experiment '0' plot per-flow throughput (default)
# '1' plot total throughput
@task
def analyse_throughput(test_id='', out_dir='', replot_only='0', source_filter='',
min_values='3', omit_const='0', ymin='0', ymax='0', lnames='',
link_len='0', stime='0.0', etime='0.0', out_name='',
pdf_dir='', ts_correct='1', plot_params='', plot_script='',
total_per_experiment='0'):
"Plot throughput for generated traffic flows"
(test_id_arr,
out_files,
out_groups) =_extract_pktsizes(test_id, out_dir, replot_only,
source_filter, link_len, ts_correct,
total_per_experiment)
if total_per_experiment == '0':
sort_flowkey='1'
else:
sort_flowkey='0'
(out_files, out_groups) = filter_min_values(out_files, out_groups, min_values)
out_name = get_out_name(test_id_arr, out_name)
plot_time_series(out_name, out_files, 'Throughput (kbps)', 2, 0.008, 'pdf',
out_name + '_throughput', pdf_dir=pdf_dir, aggr='1',
omit_const=omit_const, ymin=float(ymin), ymax=float(ymax),
lnames=lnames, stime=stime, etime=etime, groups=out_groups,
sort_flowkey=sort_flowkey,
plot_params=plot_params, plot_script=plot_script,
source_filter=source_filter)
# done
puts('\n[MAIN] COMPLETED plotting throughput %s \n' % out_name)
## Get list of experiment IDs
# @param exp_list List of all test IDs
# @param test_id Test ID prefix of experiment to analyse
def get_experiment_list(exp_list='', test_id=''):
if test_id != '':
experiments = [test_id]
else:
try:
with open(exp_list) as f:
# read lines without newlines
experiments = f.read().splitlines()
except IOError:
abort('Cannot open file %s' % exp_list)
return experiments
## Do all extraction
# @param exp_list List of all test IDs
# @param test_id Test ID prefix of experiment to analyse
# @param out_dir Output directory for result files
# @param replot_only Don't extract data again, just redo the plot
# @param source_filter Filter on specific sources
# @param resume_id Resume analysis with this test_id (ignore all test_ids before this),
# only effective if test_id is not specified
# @param link_len '0' throughput based on IP length (default),
# '1' throughput based on link-layer length
# @param ts_correct '0' use timestamps as they are (default)
# '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @param io_filter 'i' only use statistics from incoming packets
# 'o' only use statistics from outgoing packets
# 'io' use statistics from incooming and outgoing packets
# (only effective for SIFTR files)
# @param web10g_version web10g version string (default is 2.0.9)
@task
def extract_all(exp_list='experiments_completed.txt', test_id='', out_dir='',
replot_only='0', source_filter='', resume_id='',
link_len='0', ts_correct='1', io_filter='o', web10g_version='2.0.9'):
"Extract SPP RTT, TCP RTT, CWND and throughput statistics"
experiments = get_experiment_list(exp_list, test_id)
do_analyse = True
if resume_id != '':
puts('Resuming analysis with test_id %s' % resume_id)
do_analyse = False
for test_id in experiments:
if test_id == resume_id:
do_analyse = True
if do_analyse:
execute(extract_rtt, test_id, out_dir, replot_only, source_filter,
ts_correct=ts_correct)
execute(extract_cwnd, test_id, out_dir, replot_only, source_filter,
ts_correct=ts_correct, io_filter=io_filter)
execute(extract_tcp_rtt, test_id, out_dir, replot_only, source_filter,
ts_correct=ts_correct, io_filter=io_filter, web10g_version=web10g_version)
execute(extract_pktsizes, test_id, out_dir, replot_only, source_filter,
link_len=link_len, ts_correct=ts_correct)
## Do all analysis
# @param exp_list List of all test IDs
# @param test_id Test ID prefix of experiment to analyse
# @param out_dir Output directory for result files
# @param replot_only Don't extract data again, just redo the plot
# @param source_filter Filter on specific sources
# @param min_values Ignore flows with less output values
# @param omit_const '0' don't omit anything, ]
# '1' omit any series that are 100% constant
# (e.g. because there was no data flow)
# @param smoothed '0' plot non-smooth RTT (enhanced RTT in case of FreeBSD),
# '1' plot smoothed RTT estimates (non enhanced RTT in case of FreeBSD)
# @param resume_id Resume analysis with this test_id (ignore all test_ids before this),
# only effective if test_id is not specified
# @param lnames Semicolon-separated list of legend names
# @param link_len '0' throughput based on IP length (default),
# '1' throughput based on link-layer length
# @param stime Start time of plot window in seconds
# (by default 0.0 = start of experiment)
# @param etime End time of plot window in seconds
# (by default 0.0 = end of experiment)
# @param out_name Name prefix for resulting pdf files
# @param pdf_dir Output directory for pdf files (graphs), if not specified it is
# the same as out_dir
# @param ts_correct '0' use timestamps as they are (default)
# '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @param io_filter 'i' only use statistics from incoming packets
# 'o' only use statistics from outgoing packets
# 'io' use statistics from incooming and outgoing packets
# (only effective for SIFTR files)
# @param web10g_version web10g version string (default is 2.0.9)
# @param plot_params Parameters passed to plot function via environment variables
# @param plot_script Specify the script used for plotting, must specify full path
@task
def analyse_all(exp_list='experiments_completed.txt', test_id='', out_dir='',
replot_only='0', source_filter='', min_values='3', omit_const='0',
smoothed='1', resume_id='', lnames='', link_len='0', stime='0.0',
etime='0.0', out_name='', pdf_dir='', ts_correct='1',
io_filter='o', web10g_version='2.0.9', plot_params='', plot_script=''):
"Compute SPP RTT, TCP RTT, CWND and throughput statistics"
experiments = get_experiment_list(exp_list, test_id)
do_analyse = True
if resume_id != '':
puts('Resuming analysis with test_id %s' % resume_id)
do_analyse = False
for test_id in experiments:
if test_id == resume_id:
do_analyse = True
if do_analyse:
execute(analyse_rtt, test_id, out_dir, replot_only, source_filter,
min_values, omit_const=omit_const, lnames=lnames, stime=stime,
etime=etime, out_name=out_name, pdf_dir=pdf_dir,
ts_correct=ts_correct, plot_params=plot_params, plot_script=plot_script)
execute(analyse_cwnd, test_id, out_dir, replot_only, source_filter, min_values,
omit_const=omit_const, lnames=lnames, stime=stime, etime=etime,
out_name=out_name, pdf_dir=pdf_dir, ts_correct=ts_correct,
io_filter=io_filter, plot_params=plot_params, plot_script=plot_script)
execute(analyse_tcp_rtt, test_id, out_dir, replot_only, source_filter, min_values,
omit_const=omit_const, smoothed=smoothed, lnames=lnames,
stime=stime, etime=etime, out_name=out_name, pdf_dir=pdf_dir,
ts_correct=ts_correct, io_filter=io_filter, web10g_version=web10g_version,
plot_params=plot_params, plot_script=plot_script)
execute(analyse_throughput, test_id, out_dir, replot_only, source_filter,
min_values, omit_const=omit_const, lnames=lnames, link_len=link_len,
stime=stime, etime=etime, out_name=out_name, pdf_dir=pdf_dir,
ts_correct=ts_correct, plot_params=plot_params, plot_script=plot_script)
## Read experiment IDs from file
# @param exp_list List of all test IDs (allows to filter out certain experiments,
# i.e. specific value comnbinations)
# @return List of experiment IDs
def read_experiment_ids(exp_list):
# read test ids
try:
with open(exp_list) as f:
# read lines without newlines
experiments = f.read().splitlines()
except IOError:
abort('Cannot open file %s' % exp_list)
if len(experiments) < 1:
abort('No experiment IDs specified')
# strip off right white space
experiments = [e.rstrip() for e in experiments]
return experiments
## Get path from first experiment in list
# @param experiments List of experiment ids
# @return Path name
def get_first_experiment_path(experiments):
# get path based on first experiment id
dir_name = ''
files = get_testid_file_list('', experiments[0],
'', 'LC_ALL=C sort')
if len(files) > 0:
dir_name = os.path.dirname(files[0])
else:
abort('Cannot find experiment %s\n'
'Remove outdated teacup_dir_cache.txt if files were moved.' % experiments[0])
return dir_name
## Build match string to match test IDs based on specified variables, and a second
## string to extract the test id prefix. does not require access to the config,
## instead it tries to get the sames from the file name and some specified prefix
# @param test_id_prefix Regular expression
# @param test_id Test ID of one experiment
# @param variables Semicolon-separated list of <var>=<value> where <value> means
# we only want experiments where <var> had the specific value
# @return match string to match test IDs, match string to extract test ID prefix
def build_match_strings(test_id='', variables='',
test_id_prefix='[0-9]{8}\-[0-9]{6}_experiment_'):
match_str = ''
var_dict = {}
if variables != '':
for var in variables.split(';'):
name, val = var.split('=')
var_dict[name] = val
res = re.search(test_id_prefix, test_id)
if res == None:
abort('Cannot find test ID prefix in test ID %s' % test_id)
# cut off the test_id_prefix part
test_id = test_id[res.end():]
# strip leading underscore (if any)
if test_id[0] == '_':
test_id = test_id[1:]
# now we have a number of parameter names and values separated by '_'
# split on '_' and then all the even elements are the names
param_short_names = test_id.split('_')[::2]
for name in param_short_names:
val = var_dict.get(name, '')
if val == '':
# we specify only fixed so this is a wildcard then
match_str += '(' + name + '_.*)' + '_'
else:
match_str += '(' + name + '_' + val + ')' + '_'
match_str = match_str[:-1] # chomp of last underscore
match_str2 = '(.*)_' + match_str # test id prefix is before match_str
match_str = test_id_prefix + match_str # add test id prefix
#print(match_str)
#print(match_str2)
return (match_str, match_str2)
## Filter out experiments based on the variables and also return
## test id prefix and list of labels to plot underneath x-axis
# @param experiments Experiment list
# @param match_str Match string to match experiment
# @param match_str2 Match string for test ID prefix extraction
# @return List of filtered experiments, test ID prefix, x-axis labels
def filter_experiments(experiments, match_str, match_str2):
fil_experiments = []
test_id_pfx = ''
xlabs = []
for experiment in experiments:
# print(experiment)
res = re.search(match_str, experiment)
if res:
fil_experiments.append(experiment)
xlabs.append('\n'.join(map(str, res.groups())))
if test_id_pfx == '':
res = re.search(match_str2, experiment)
if res:
test_id_pfx = res.group(1)
xlabs = [x.replace('_', ' ') for x in xlabs]
# print(fil_experiments)
# print(xlabs)
return (fil_experiments, test_id_pfx, xlabs)
## Get plot parameters based on metric
# @param metric Metric name
# @param smoothed If '1' plot smoothed RTT, if '0' plot unsmoothed RTT
# @param ts_correct If '1' use file with corrected timestamps, if '0' use uncorrected file
# @param stat_index See analyse_tcp_stat
# @param dupacks See analyse_ackseq
# @param cum_ackseq See analyse_ackseq
# @param slowest_only See analyse_incast
# @return File extension, y-axis label, index of metric in file, scaler, separator,
# aggregation flag, difference flag
def get_metric_params(metric='', smoothed='0', ts_correct='1', stat_index='0', dupacks='0',
cum_ackseq='1', slowest_only='0'):
diff = '0'
if metric == 'throughput':
ext = '.psiz'
ylab = 'Throughput (kbps)'
yindex = 2
yscaler = 0.008
sep = ' '
aggr = '1'
elif metric == 'spprtt':
ext = '.rtts'
ylab = 'SPP RTT (ms)'
yindex = 2
yscaler = 1000.0
sep = ' '
aggr = '0'
elif metric == 'tcprtt':
ext = '.tcp_rtt'
ylab = 'TCP RTT (ms)'
if smoothed == '1':
yindex = 2
else:
yindex = 3
yscaler = 1.0
sep = ','
aggr = '0'
elif metric == 'cwnd':
ext = '.cwnd'
ylab = 'CWND'
yindex = 2
yscaler = 1.0
sep = ','
aggr = '0'
elif metric == 'tcpstat':
ext = '.tcpstat_' + stat_index
ylab = 'TCP statistic ' + stat_index
yindex = 2
yscaler = 1.0
sep = ','
aggr = '0'
elif metric == 'ackseq':
ext = '.acks'
if dupacks == '0' :
if cum_ackseq == '1':
ylab = 'Bytes acknowledged (Kbytes)'
else:
ylab = 'Bytes acknowledged (Kbytes/s)'
yindex = 2
yscaler = (1.0 / 1024.0)
else :
if cum_ackseq == '1':
ylab = 'Cummulative dupACKs'
else:
ylab = 'dupACKs per second'
yindex = 3
yscaler = 1.0
sep = ' '
if cum_ackseq == '1':
aggr = '0'
diff = '0'
else:
aggr = '1'
diff = '1'
elif metric == 'restime':
# XXX cannot select the tcpdump times here at the moment
ext = '.rtimes'
ylab = 'Response time (s)'
yindex = 3
yscaler = 1.0
sep = ' '
aggr = '0'
if slowest_only != '0':
ext = 'rtimes.slowest'
yindex = 2
elif metric == 'iqtime':
ext = '.iqtimes'
ylab = 'Inter-query time (ms)'
yindex = 5 # time gap to previous request
yscaler = 1000.0
sep = ' '
aggr = '0'
elif metric == 'pktloss':
ext = '.loss'
ylab = 'Packet loss (%)'
yindex = 2
yscaler = 1.0
sep = ' '
aggr = '2'
# elif add more
else:
return None
if ts_correct == '1' and metric != 'restime':
ext += DATA_CORRECTED_FILE_EXT
if metric == 'spprtt' or metric == 'ackseq':
# select the all bursts file
ext += '.0'
elif metric == 'iqtime':
# select the all responders file
ext += '.all'
return (ext, ylab, yindex, yscaler, sep, aggr, diff)
## Get extract function based on metric
# @param metric Metric name
# @param link_len See analyse_throughput
# @param stat_index See analyse_tcp_stat
# @param slowest_only See analyse_incast
# @param sburst Start plotting with burst N (bursts are numbered from 1)
# @param eburst End plotting with burst N (bursts are numbered from 1)
# @param query_host See analyse_incast_iqtimes
# @return extract function, keyword arguments to pass to extract function
def get_extract_function(metric='', link_len='0', stat_index='0', slowest_only='0',
sburst='1', eburst='0', query_host=''):
# define a map of metrics and corresponding extract functions
extract_functions = {
'throughput' : _extract_pktsizes,
'spprtt' : _extract_rtt,
'tcprtt' : _extract_tcp_rtt,
'cwnd' : _extract_cwnd,
'tcpstat' : _extract_tcp_stat,
'ackseq' : _extract_ackseq,
'restime' : _extract_incast,
'iqtime' : _extract_incast_iqtimes,
'pktloss' : _extract_pktloss,
}
# additonal arguments for extract functions
extract_kwargs = {
'throughput' : { 'link_len' : link_len },
'spprtt' : { },
'tcprtt' : { },
'cwnd' : { },
'tcpstat' : { 'siftr_index' : stat_index,
'web10g_index' : stat_index },
'ackseq' : { 'burst_sep' : 0.0,
'sburst' : sburst,
'eburst' : eburst },
'restime' : { 'sburst' : sburst,
'eburst' : eburst,
'slowest_only' : slowest_only },
'iqtime' : { 'cumulative' : '0',
'by_responder' : '0',
'query_host' : query_host },
'pktloss' : { },
}
return (extract_functions[metric], extract_kwargs[metric])
## Function that plots mean, median, boxplot of throughput, RTT and other metrics
## for different parameter combinations
## XXX currently can't reorder the experiment parameters, order is the one given by
## config.py (and in the file names)
# @param exp_list List of all test IDs (allows to filter out certain experiments,
# i.e. specific value comnbinations)
# @param res_dir Directory with result files from analyse_all
# @param out_dir Output directory for result files
# @param source_filter Filter on specific sources
# (number of filters must be smaller equal to 12)
# @param min_values Ignore flows with less output values / packets
# @param omit_const '0' don't omit anything,
# '1' omit any series that are 100% constant
# (e.g. because there was no data flow)
# @param metric Metric can be 'throughput', 'spprtt' (spp rtt), 'tcprtt' (unsmoothed tcp rtt),
# 'cwnd', 'tcpstat', with 'tcpstat' must specify siftr_index or web10g_index
# 'restime', 'ackseq', 'iqtime'
# @param ptype Plot type: 'mean', 'median', 'box' (boxplot)
# @param variables Semicolon-separated list of <var>=<value> where <value> means
# we only want experiments where <var> had the specific value
# @param out_name Name prefix for resulting pdf file
# @param ymin Minimum value on y-axis
# @param ymax Maximum value on y-axis
# @param lnames Semicolon-separated list of legend names
# @param group_by_prefix Group by prefix instead of group by traffic flow
# @param omit_const_xlab_vars '0' show all variables in the x-axis labels,
# '1' omit constant variables in the x-axis labels
# @param pdf_dir Output directory for pdf files (graphs), if not specified it
# is the same as out_dir
# @param stime Start time of time window to analyse
# (by default 0.0 = start of experiment)
# @param etime End time of time window to analyse (by default 0.0 = end of
# experiment)
# @param ts_correct '0' use timestamps as they are (default)
# '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @param smoothed '0' plot non-smooth RTT (enhanced RTT in case of FreeBSD),
# '1' plot smoothed RTT estimates (non enhanced RTT in case of FreeBSD)
# @param link_len '0' throughput based on IP length (default),
# '1' throughput based on link-layer length
# @param replot_only '0' extract data
# '1' don't extract data again, just redo the plot
# @param plot_params Parameters passed to plot function via environment variables
# @param plot_script Specify the script used for plotting, must specify full path
# (default is config.TPCONF_script_path/plot_cmp_experiments.R)
# @param stat_index Integer number of the column in siftr/web10g log files
# need when metric is 'tcpstat'
# @param dupacks '0' to plot ACKed bytes vs time
# '1' to plot dupACKs vs time
# @param cum_ackseq '0' average per time window data
# '1' cumulative counter data
# @param merge_data '0' by default don't merge data
# '1' merge data for each experiment, i.e. merge statistics of all flows
# (merging does not make sense in every case, user need to decide)
# @param sburst Start plotting with burst N (bursts are numbered from 1)
# @param eburst End plotting with burst N (bursts are numbered from 1)
# @param test_id_prefix Prefix used for the experiments (used to get variables
# names from the file names
# @param slowest_only '0' plot all response times (metric restime)
# '1' plot only the slowest response times for each burst
# '2' plot time between first request and last response finished
# @param res_time_mode '0' normal plot (default)
# '1' plot nominal response times in addition box/median/mean of
# observed response times
# '2' plot ratio of median/mean (as per ptype) and nominal response
# time
# @param query_host Name of querier (only for iqtime metric)
@task
def analyse_cmpexp(exp_list='experiments_completed.txt', res_dir='', out_dir='',
source_filter='', min_values='3', omit_const='0', metric='throughput',
ptype='box', variables='', out_name='', ymin='0', ymax='0', lnames='',
group_by_prefix='0', omit_const_xlab_vars='0', replot_only='0',
pdf_dir='', stime='0.0', etime='0.0', ts_correct='1', smoothed='1',
link_len='0', plot_params='', plot_script='', stat_index='',
dupacks='0', cum_ackseq='1', merge_data='0', sburst='1',
eburst='0', test_id_prefix='[0-9]{8}\-[0-9]{6}_experiment_',
slowest_only='0', res_time_mode='0', query_host=''):
"Compare metrics for different experiments"
if ptype != 'box' and ptype != 'mean' and ptype != 'median':
abort('ptype must be either box, mean or median')
check = get_metric_params(metric, smoothed, ts_correct)
if check == None:
abort('Unknown metric %s specified' % metric)
if source_filter == '':
abort('Must specify at least one source filter')
if len(source_filter.split(';')) > 12:
abort('Cannot have more than 12 filters')
# prevent wrong use of res_time_mode
if metric != 'restime' and res_time_mode != '0':
res_time_mode = '0'
if ptype == 'box' and res_time_mode == '2':
res_time_mode = '0'
# XXX more param checking
# Initialise source filter data structure
sfil = SourceFilter(source_filter)
# read test ids
experiments = read_experiment_ids(exp_list)
# get path based on first experiment id
dir_name = get_first_experiment_path(experiments)
# if we haven' got the extracted data run extract method(s) first
if res_dir == '':
for experiment in experiments:
(ex_function, kwargs) = get_extract_function(metric, link_len,
stat_index, sburst=sburst, eburst=eburst,
slowest_only=slowest_only, query_host=query_host)
(dummy, out_files, out_groups) = ex_function(
test_id=experiment, out_dir=out_dir,
source_filter=source_filter,
replot_only=replot_only,
ts_correct=ts_correct,
**kwargs)
if out_dir == '' or out_dir[0] != '/':
res_dir = dir_name + '/' + out_dir
else:
res_dir = out_dir
else:
if res_dir[0] != '/':
res_dir = dir_name + '/' + res_dir
# make sure we have trailing slash
res_dir = valid_dir(res_dir)
if pdf_dir == '':
pdf_dir = res_dir
else:
if pdf_dir[0] != '/':
pdf_dir = dir_name + '/' + pdf_dir
pdf_dir = valid_dir(pdf_dir)
# if pdf_dir specified create if it doesn't exist
mkdir_p(pdf_dir)
#
# build match string from variables
#
(match_str, match_str2) = build_match_strings(experiments[0], variables,
test_id_prefix)
#
# filter out the experiments to plot, generate x-axis labels, get test id prefix
#
(fil_experiments,
test_id_pfx,
xlabs) = filter_experiments(experiments, match_str, match_str2)
#
# get out data files based on filtered experiment list and source_filter
#
(ext,
ylab,
yindex,
yscaler,
sep,
aggr,
diff) = get_metric_params(metric, smoothed, ts_correct, stat_index, dupacks,
cum_ackseq, slowest_only)
res_time_env = ''
if res_time_mode == '1':
res_time_env = 'NOMINAL_RES_TIME="1"'
if res_time_mode == '2':
if ptype == 'median':
ylab = 'Median resp time / nominal resp time'
elif ptype == 'mean':
ylab = 'Mean resp time / nominal resp time'
res_time_env += ' RATIO_RES_TIME="1"'
leg_names = source_filter.split(';')
# if we merge responders make sure we only use the merged files
if merge_data == '1':
# set label to indicate merged data
leg_names = ['Merged data']
# reset source filter so we match the merged file
sfil.clear()
source_filter = 'S_0.0.0.0_0'
sfil = SourceFilter(source_filter)
file_names = []
for experiment in fil_experiments:
out_files = {}
_ext = ext
files = get_testid_file_list('', experiment,
'%s' % _ext,
'LC_ALL=C sort', res_dir)
if merge_data == '1':
# change extension
_ext += '.all'
files = merge_data_files(files)
#print(files)
match_str = '.*_([0-9\.]*_[0-9]*_[0-9\.]*_[0-9]*)[0-9a-z_.]*' + _ext
for f in files:
# print(f)
res = re.search(match_str, f)
#print(res.group(1))
if res and sfil.is_in(res.group(1)):
# only add file if enough data points
rows = int(
local('wc -l %s | awk \'{ print $1 }\'' %
f, capture=True))
if rows > int(min_values):
out_files[res.group(1)] = f
#print(out_files)
#print(leg_names)
if len(out_files) < len(leg_names):
abort(
'No data files for some of the source filters for experiment %s' %
experiment)
sorted_files = sort_by_flowkeys(out_files, source_filter)
for name, file_name in sorted_files:
file_names.append(file_name)
if group_by_prefix == '1':
# group by test prefix (and flow)
# first, get all test id prefixes
test_id_pfxs = {}
for experiment in fil_experiments:
res = re.search(match_str2, experiment)
if res:
test_id_pfxs[res.group(1)] = 1
# second, sort files so that same parameter combinations for different
# prefixes are together
# if we have multiple prefixes, create legend entry for each
# prefix+flow combination
_file_names = [''] * len(file_names)
_leg_names = []
pfx_cnt = len(test_id_pfxs)
i = 0
j = -1
last_pfx = ''
for name in file_names:
for p in test_id_pfxs:
if name.find(p) > -1:
curr_pfx = p
break
if curr_pfx != last_pfx:
i = 0
j += 1
for l in leg_names:
_leg_names.append(curr_pfx + '-' + l)
_file_names[i * pfx_cnt + j] = name
i += 1
last_pfx = curr_pfx
file_names = _file_names
leg_names = _leg_names
# remove duplicates in the x-axis labels
xlabs = list(set(xlabs))
if lnames != '':
lnames_arr = lnames.split(';')
if len(lnames_arr) != len(leg_names):
abort(
'Number of legend names must be qual to the number of source filters')
leg_names = lnames_arr
# filter out unchanged variables in the x labels (need at least 2 labels)
if omit_const_xlab_vars == '1' and len(xlabs) > 1:
xlabs_arrs = {}
xlabs_changed = {}
for i in range(len(xlabs)):
xlabs_arrs[i] = xlabs[i].split('\n')
for i in range(len(xlabs_arrs[0])):
changed = False
xlab_var = xlabs_arrs[0][i]
for j in range(1, len(xlabs)):
if xlabs_arrs[j][i] != xlab_var:
changed = True
break
xlabs_changed[i] = changed
for i in range(len(xlabs)):
tmp = []
for j in range(len(xlabs_arrs[i])):
if xlabs_changed[j]:
tmp.append(xlabs_arrs[i][j].replace('_', ' ', 1))
xlabs[i] = '\n'.join(tmp)
print(leg_names)
print(file_names)
#
# pass the data files and auxilary info to plot function
#
if out_name != '':
oprefix = out_name + '_' + test_id_pfx + '_' + metric + '_' + ptype
else:
oprefix = test_id_pfx + '_' + metric + '_' + ptype
title = oprefix
if plot_script == '':
plot_script = 'R CMD BATCH --vanilla %s/plot_cmp_experiments.R' % \
config.TPCONF_script_path
# interface between this code and the plot function are environment variables
# the following variables are passed to plot function:
# TITLE: character string that is plotted over the graph
# FNAMES: comma-separated list of file names (each file contains one date series,
# e.g. data for one flow). The format of each file is CSV-style, but the
# separator does not have to be a comma (can be set with SEP). The first
# column contains the timestamps. The second, third etc. columns contain
# data, but only one of these columns will be plotted (set with YINDEX).
# LNAMES: comma-separated list of legend names. this list has the same length
# as FNAMES and each entry corresponds to data in file name with the
# same index in FNAMES
# XLABS: comma-separated list of labels for the x-axis ticks, one for each parameter
# combination that is plotted
# YLAB: y-axis label character string
# YINDEX: index of data column in file to plot on y-axis (file can have more than
# one data column)
# YSCALER: factor which is multiplied with each data value before plotting
# SEP: column separator used in data file
# OTYPE: type of output graph (default is 'pdf')
# OPREFIX: the prefix (first part) of the graph file name
# ODIR: directory where output files, e.g. pdfs are placed
# AGGR: set to '1' means data is aggregated over time intervals, more specifically
# the data is summed over the time intervals (used to determine throughput
# over time windows based on packet lengths)
# set to '0' means plot data as is
# OMIT_CONST: '0' don't omit anything,
# '1' omit any data series from plot that are 100% constant
# PTYPE: the type of plot identified by name, it can be 'box', 'mean' or 'median'
# for the default R script
# YMIN: minimum value on y-axis (for zooming in), default is 0
# YMAX: maximum value on y-axis (for zooming in), default is 0 meaning the
# maximum value is determined from the data
# STIME: start time on x-axis (for zooming in), default is 0.0 meaning the start
# of an experiment
# ETIME: end time on x-axis (for zooming in), default is 0.0 meaning the end of an
# experiment a determined from the data
#local('which R')
local('TITLE="%s" FNAMES="%s" LNAMES="%s" XLABS="%s" YLAB="%s" YINDEX="%d" '
'YSCALER="%f" SEP="%s" OTYPE="%s" OPREFIX="%s" ODIR="%s" AGGR="%s" DIFF="%s" '
'OMIT_CONST="%s" PTYPE="%s" YMIN="%s" YMAX="%s" STIME="%s" ETIME="%s" %s '
'%s '
'%s %s%s_plot_cmp_experiments.Rout' %
(title, ','.join(file_names), ','.join(leg_names), ','.join(xlabs), ylab,
yindex, yscaler, sep, 'pdf', oprefix, pdf_dir, aggr, diff,
omit_const, ptype, ymin, ymax, stime, etime, res_time_env, plot_params,
plot_script, pdf_dir, oprefix))
if config.TPCONF_debug_level == 0:
local('rm -f %s%s_plot_cmp_experiments.Rout' % (pdf_dir, oprefix))
# done
puts('\n[MAIN] COMPLETED analyse_cmpexp %s \n' % test_id_pfx)
## Extract incast response times from httperf files
## The extracted files have an extension of .rtimes. The format is CSV with the
## columns:
## 1. Request timestamp (seconds.microseconds)
## 2. Burst number
## 3. Response time (seconds)
# @param test_id Test ID prefix of experiment to analyse
# @param out_dir Output directory for results
# @param replot_only Don't extract data again that is already extracted
# @param source_filter Filter on specific sources
# @param ts_correct '0' use timestamps as they are (default)
# '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @param sburst Start plotting with burst N (bursts are numbered from 1)
# @param eburst End plotting with burst N (bursts are numbered from 1)
# @param slowest_only '0' plot response times for individual responders
# '1' plot slowest response time across all responders
# '2' plot time between first request and last response finished
# @return Experiment ID list, map of flow names to file names, map of file names
# to group IDs
def _extract_incast(test_id='', out_dir='', replot_only='0', source_filter='',
ts_correct='1', sburst='1', eburst='0', slowest_only='0'):
"Extract incast response times for generated traffic flows"
ifile_ext = 'httperf_incast.log.gz'
ofile_ext = '.rtimes'
# abort in case of responder timeout
abort_extract = False
out_files = {}
out_groups = {}
sburst = int(sburst)
eburst = int(eburst)
test_id_arr = test_id.split(';')
if len(test_id_arr) == 0 or test_id_arr[0] == '':
abort('Must specify test_id parameter')
# Initialise source filter data structure
sfil = SourceFilter(source_filter)
group = 1
for test_id in test_id_arr:
# first find httperf files (ignore router and ctl interface tcpdumps)
log_files = get_testid_file_list('', test_id,
ifile_ext, '')
for log_file in log_files:
# get input directory name and create result directory if necessary
out_dirname = get_out_dir(log_file, out_dir)
# get src ip from file name
src = local(
'echo %s | sed "s/.*_\([a-z0-9\.]*\)_[0-9]*_httperf_incast.log.gz/\\1/"' %
log_file,
capture=True)
# don't know source port, use it to differentiate experiments
# must use high port otherwise the later sorting will fail
src_port = str(50000 + group)
# get destination ip and port from log file
responders = _list(
local(
'zcat %s | grep "hash_enter" | grep -v localhost | cut -d" " -f 2,3' %
log_file, capture=True))
cnt = 0
for _resp in responders:
dst = _resp.split(' ')[0]
dst_port = _resp.split(' ')[1]
# get external and internal addresses
src, src_internal = get_address_pair_analysis(test_id, src, do_abort='0')
dst, dst_internal = get_address_pair_analysis(test_id, dst, do_abort='0')
#print(src, src_port, dst, dst_port)
if src == '' or dst == '':
continue
# flow name
name = src_internal + '_' + src_port + \
'_' + dst_internal + '_' + dst_port
# test id plus flow name
if len(test_id_arr) > 1:
long_name = test_id + '_' + name
else:
long_name = name
if not sfil.is_in(name):
continue
out_fname = out_dirname + test_id + '_' + name + ofile_ext
out_files[long_name] = out_fname
out_groups[out_fname] = group
if replot_only == '0' or not os.path.isfile(out_fname) :
f = open(out_fname, 'w')
responses = _list(local('zcat %s | grep "incast_files"' %
log_file, capture=True))
time = 0.0
bursts = {}
for response in responses:
request_ts = float(response.split()[0])
responder_id = int(response.split()[2])
response_time = response.split()[9]
interval = float(response.split()[11])
timed_out = response.split()[12]
if responder_id == cnt:
if not responder_id in bursts:
bursts[responder_id] = 0
bursts[responder_id] += 1
# do only write the times for burst >= sburst and burst <= eburst
# but sburst=0/eburst=0 means no lower/upper limit
if bursts[responder_id] >= sburst and \
(eburst == 0 or bursts[responder_id] <= eburst):
if timed_out == 'no':
f.write('%f %i %s\n' % (request_ts, bursts[responder_id],
response_time))
else:
f.write('%f NA NA\n' % time)
abort_extract = True
time += interval
f.close()
cnt += 1
# abort but only after we fully processed the problematic experiment
if abort_extract:
abort('Responder timed out in experiment %s' % test_id)
group += 1
if slowest_only != '0':
(out_files, out_groups) = get_slowest_response_time(out_files, out_groups,
int(slowest_only) - 1)
return (test_id_arr, out_files, out_groups)
## Extract incast
## SEE _extract_incast
@task
def extract_incast(test_id='', out_dir='', replot_only='0', source_filter='',
ts_correct='1', sburst='1', eburst='0'):
"Extract incast response times for generated traffic flows"
_extract_incast(test_id, out_dir, replot_only, source_filter, ts_correct,
sburst, eburst)
# done
puts('\n[MAIN] COMPLETED extracting incast response times %s\n' % test_id)
## Get slowest response time per burst
# @param out_files List of data files
# @param out_groups Map of files to groups
# @param mode '0' slowest response time
# '1' time between first request and last response finished
# @return Map of flow names to file names, map of file names to group IDs
def get_slowest_response_time(out_files, out_groups, mode=0):
ofile_ext = '.rtimes'
slowest = {}
earliest = {}
latest = {}
burst_time = {}
for group in set(out_groups.values()):
fname = ''
for name in out_files.keys():
if out_groups[out_files[name]] == group:
# read data file and adjust slowest
f = open(out_files[name], 'r')
for line in f.readlines():
_time = float(line.split()[0])
_burst = float(line.split()[1])
# response time is in last column, but column number differs
# for httperf vs tcpdump extracted data
_res_time = float(line.split()[-1])
_time_finished = _time + _res_time
# use the first time as time burst ocurred
if _burst not in burst_time:
burst_time[_burst] = _time
if _burst not in slowest:
slowest[_burst] = _res_time
else:
if _res_time > slowest[_burst]:
slowest[_burst] = _res_time
if _burst not in earliest:
earliest[_burst] = _time
else:
if _time < earliest[_burst]:
earliest[_burst] = _time
if _burst not in latest:
latest[_burst] = _time_finished
else:
if _time_finished > latest[_burst]:
latest[_burst] = _time_finished
f.close()
if fname == '':
fname = out_files[name]
# delete entries for single responders
del out_groups[out_files[name]]
del out_files[name]
fname = re.sub('_[0-9]*_[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*_[0-9]*\.', '_0_0.0.0.0_0.', fname)
fname += '.slowest'
name = 'Experiment ' + str(group) + ' slowest'
# write file for slowest response times
f = open(fname, 'w')
for _burst in sorted(slowest.keys()):
if mode == 0:
# slowest response time of all
f.write('%f %f\n' % (burst_time[_burst], slowest[_burst]))
else:
# time between first request and last response finished
f.write('%f %f\n' % (burst_time[_burst], latest[_burst] - earliest[_burst]))
f.close()
out_files[name] = fname
out_groups[fname] = group
return (out_files, out_groups)
## Plot incast response times
# @param test_id Test ID prefix of experiment to analyse
# @param out_dir Output directory for results
# @param replot_only Don't extract data again, just redo the plot
# @param source_filter Filter on specific sources
# @param min_values Ignore flows with equal less output values / packets
# @param omit_const '0' don't omit anything,
# '1' omit any series that are 100% constant
# (e.g. because there was no data flow)
# @param ymin Minimum value on y-axis
# @param ymax Maximum value on y-axis
# @param lnames Semicolon-separated list of legend names
# @param stime Start time of plot window in seconds
# (by default 0.0 = start of experiment)
# @param etime End time of plot window in seconds (by default 0.0 = end of experiment)
# @param out_name Name prefix for resulting pdf file
# @param tcpdump '0' by default use the response times reported by httperf
# '1' plot response times based on tcpdump data (time between GET packet
# and last packet of the response)
# @param query_host If tcpdump=0 we don't need to set this parameter. however, tcpdump=1
# query_host needs to be set to the host name that was the querier.
# The name of the host as specified in the config file.
# @param pdf_dir Output directory for pdf files (graphs), if not specified it is
# the same as out_dir
# @param ts_correct '0' use timestamps as they are (default)
# '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @param slowest_only '0' plot response times for individual responders
# '1' plot slowest response time across all responders
# '2' plot time between first request and last response finished
# @param boxplot '0' normal time series (default)
# '1' boxplot for each point in time
# @param sburst Start plotting with burst N (bursts are numbered from 1)
# @param eburst End plotting with burst N (bursts are numbered from 1)
# @param plot_params Set env parameters for plotting
# @param plot_script Specify the script used for plotting, must specify full path
@task
def analyse_incast(test_id='', out_dir='', replot_only='0', source_filter='',
min_values='3', omit_const='0', ymin='0', ymax='0', lnames='',
stime='0.0', etime='0.0', out_name='', tcpdump='0', query_host='',
pdf_dir='', ts_correct='1', slowest_only='0',
boxplot='0', sburst='1', eburst='0', plot_params='', plot_script=''):
"Plot incast response times for generated traffic flows"
pdf_name_part = '_restime'
sort_flowkey = '1'
if tcpdump == '1':
# XXX no sburst and eburst for tcpdump yet
if query_host == '':
abort('Must specify query_host')
(test_id_arr,
out_files,
out_groups) = _extract_incast_restimes(test_id, out_dir, replot_only,
source_filter, ts_correct, query_host, slowest_only)
yindex = 5
ofile_ext = '.restimes'
else:
(test_id_arr,
out_files,
out_groups) = _extract_incast(test_id, out_dir, replot_only, source_filter,
ts_correct, sburst, eburst, slowest_only)
yindex = 3
ofile_ext = '.rtimes'
if slowest_only != '0':
pdf_name_part = '_restime_slowest'
sort_flowkey = '0'
# the slowest code produces an output file with only two columns
# (time, response time)
yindex = 2
out_name = get_out_name(test_id_arr, out_name)
plot_time_series(out_name, out_files, 'Response time (s)', yindex, 1.0, 'pdf',
out_name + pdf_name_part, pdf_dir=pdf_dir,
ymin=float(ymin), ymax=float(ymax),
lnames=lnames, stime=stime, etime=etime,
groups=out_groups, sort_flowkey=sort_flowkey,
boxplot=boxplot, plot_params=plot_params, plot_script=plot_script,
source_filter=source_filter)
# done
puts('\n[MAIN] COMPLETED plotting incast response times %s\n' % out_name)
## Extract_dupACKs_bursts
# @param acks_file Full path to a specific .acks file which is to be parsed
# for dupACKs and (optionally) extract sequence of ACK bursts
# @param burst_sep =0, Just calculate running total of dupACKs and create acks_file+".0" output file
# < 0, extract bursts into acks_file+".N" outputfiles (for burst N),
# where burst starts @ t=0 and then burst_sep seconds after start of previous burst
# > 0, extract bursts into acks_file+".N" outputfiles (for burst N)
# where burst starts @ t=0 and then burst_sep seconds after end of previous burst
# @return Vector of file names (one for each file generated)
#
# First task is to calculate the number of duplicate ACKs. Define
# them as ACKs whose sequence number is unchanged from the immediately
# preceding ACK.
#
# Generate .acks.0 file with this format:
#
# <time> <ack_seq_no> <cumulative_dupACK_count>
#
#
#If burst_sep != 0 then we try to further subdivide into "bursts"
#
# Output is multiple .acks.N files, containing only the lines for
# burst N:
#
# <time> <ack_seq_no> <cumulative_dupACK_count>
#
# The <ack_seq_no> starts at 0 for burst 1 (since the first
# ACK is assuemd to be the end of the handshake rather than ACK'ing
# a Data packet), but starts at a small non-zero value for the first
# ACK of bursts 2..N.
#
# The <cumulative_dupACK_count> restarts at 0 for each burst.
#
# NOTE: This function relies on there being no re-ordering of ACK packets on
# the return path.
#
def extract_dupACKs_bursts(acks_file='', burst_sep=0):
# New filenames (source file + ".0" or ".1,.2,....N" for bursts)
new_fnames = [];
# Internal variables
burstN = 1
firstTS = -1
try:
_acks = []
# First read the entire contents of a .acks file
with open(acks_file) as f:
_acks = f.readlines()
#print _acks
if burst_sep != 0 :
# Create the first .acks.N output file
out_f = open(acks_file+"."+"1","w")
new_fnames.append(acks_file+"."+"1")
else:
out_f = open(acks_file+"."+"0","w")
new_fnames.append(acks_file+"."+"0")
# Now walk through every line of the .acks file
for oneline in _acks:
# ackdetails[0] is the timestamp, ackdetails[1] is the seq number
ackdetails = oneline.split()
if firstTS == -1 :
# This is first time through the loop, so set some baseline
# values for later offsets
firstTS = ackdetails[0]
prev_ACKTS = firstTS
firstBytes = 0
# Is this ACK a dupACK ?
if int(ackdetails[1]) == 0 :
# Only the first ACK line has zero seq number. Special case, reset dupACKs count
dupACKs = 0
prev_seqno = ackdetails[1]
else:
# Define dupACK as an ACK with unchanged seq number wrt preceding ACK
if (int(ackdetails[1]) - int(prev_seqno)) == 0 :
dupACKs += 1
# If burst_sep == 0 the only thing we're calculating is a
# cumulative running total of dupACKs, so we only do burst
# identification if burst_sep != 0
if burst_sep != 0 :
if burst_sep < 0 :
# ack_gap is time since first ACK of this burst
# (i.e. relative to firstTS)
ack_gap = float(ackdetails[0]) - float(firstTS)
else:
# ack_gap is time since previous ACK in this burst
# (i.e. relative to prev_ACKTS)
ack_gap = float(ackdetails[0]) - float(prev_ACKTS)
# New burst begins when time between this ACK and previous
# exceeds abs(burst_sep)
if (ack_gap >= abs(burst_sep)) :
# We've found the first ACK of the _next_ burst
# Close previous burst output file
out_f.close()
# Move on to the next burst
burstN += 1
print ("Burst: %3i, ends at %f sec, data: %i bytes, gap: %3.6f sec, dupACKs: %i" %
( (burstN-1), float(prev_ACKTS), int(prev_seqno) - int(firstBytes), ack_gap, dupACKs ) )
# Reset firstTS to the beginning (first timestamp) of this new burst
firstTS = ackdetails[0]
# The sequence number of first ACK of bursts 2...N must be considered
# relative to LAST seq number of PREVIOUS burst in order to calculate
# how many bytes were fully sent in bursts 2...N.
firstBytes = prev_seqno
# Reset the dupACKs counter
dupACKs = 0
# Create the next .acks.N output file
out_f = open(acks_file+"."+str(burstN),"w")
new_fnames.append(acks_file+"."+str(burstN))
# How many bytes were ACK'ed since beginning? (Of entire file or of burst N)
# This must be calculated _after_ firstBytes is potentially reset on
# the boundary between bursts.
bytes_gap = int(ackdetails[1]) - int(firstBytes)
#print "Burst: ", burstN, " Time ", ackdetails[0] ," Bytes ", bytes_gap, " DupACKS ", dupACKs
# Write to burst-specific output file
# <time> <ACK seq number> <dupACK count>
out_f.write(ackdetails[0]+" "+str(bytes_gap)+" "+str(dupACKs)+"\n")
# Store the seq number for next time around the loop
prev_seqno = ackdetails[1]
prev_ACKTS = ackdetails[0]
# Close the last output file
out_f.close()
except IOError:
print('extract_dupACKs_bursts(): File access problem while working on %s' % acks_file)
return new_fnames
## Extract cumulative bytes ACKnowledged and cumulative dupACKs
## Intermediate files end in ".acks", ".acks.N", ".acks.tscorr" or ".acks.tscorr.N"
## XXX move sburst and eburst to the plotting task and here extract all?
# @param test_id Semicolon-separated list of test ID prefixes of experiments to analyse
# @param out_dir Output directory for results
# @param replot_only '1' don't extract raw ACK vs time data per test_ID if already done,
# but still re-calculate dupACKs and bursts (if any) before plotting results
# '0' always extract raw data
# @param source_filter Filter on specific flows to process
# @param ts_correct '0' use timestamps as they are (default)
# '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @param burst_sep '0' plot seq numbers as they come, relative to 1st seq number
# > '0' plot seq numbers relative to 1st seq number after gaps
# of more than burst_sep milliseconds (e.g. incast query/response bursts)
# < 0, plot seq numbers relative to 1st seq number after each abs(burst_sep)
# seconds since the first burst @ t = 0 (e.g. incast query/response bursts)
# @param sburst Start plotting with burst N (bursts are numbered from 1)
# @param eburst End plotting with burst N (bursts are numbered from 1)
# @param total_per_experiment '0' per-flow data (default)
# '1' total data
# @return Experiment ID list, map of flow names to file names, map of file names to group IDs
def _extract_ackseq(test_id='', out_dir='', replot_only='0', source_filter='',
ts_correct='1', burst_sep='0.0',
sburst='1', eburst='0', total_per_experiment='0'):
"Extract cumulative bytes ACKnowledged vs time / extract incast bursts"
ifile_ext = '.dmp.gz'
ofile_ext = '.acks'
sburst = int(sburst)
eburst = int(eburst)
burst_sep = float(burst_sep)
already_done = {}
out_files = {}
out_groups = {}
test_id_arr = test_id.split(';')
if len(test_id_arr) == 0 or test_id_arr[0] == '':
abort('Must specify test_id parameter')
# Initialise source filter data structure
sfil = SourceFilter(source_filter)
group = 1
for test_id in test_id_arr:
# first process tcpdump files (ignore router and ctl interface tcpdumps)
tcpdump_files = get_testid_file_list('', test_id,
ifile_ext,
'grep -v "router.dmp.gz" | grep -v "ctl.dmp.gz"')
for tcpdump_file in tcpdump_files:
# get input directory name and create result directory if necessary
dir_name = os.path.dirname(tcpdump_file)
out_dirname = get_out_dir(tcpdump_file, out_dir)
# unique flows
flows = lookup_flow_cache(tcpdump_file)
if flows == None:
flows = _list(local('zcat %s | tcpdump -nr - "tcp" | '
'awk \'{ if ( $2 == "IP" ) { print $3 " " $5 " tcp" } }\' | '
'sed "s/://" | '
'sed "s/\.\([0-9]*\) /,\\1 /g" | sed "s/ /,/g" | '
'LC_ALL=C sort -u' %
tcpdump_file, capture=True))
append_flow_cache(tcpdump_file, flows)
# since client sends first packet to server, client-to-server flows
# will always be first
for flow in flows:
src, src_port, dst, dst_port, proto = flow.split(',')
# get external and internal addresses
src, src_internal = get_address_pair_analysis(test_id, src, do_abort='0')
dst, dst_internal = get_address_pair_analysis(test_id, dst, do_abort='0')
if src == '' or dst == '':
continue
# flow name
name = src_internal + '_' + src_port + \
'_' + dst_internal + '_' + dst_port
rev_name = dst_internal + '_' + dst_port + \
'_' + src_internal + '_' + src_port
# test id plus flow name
if len(test_id_arr) > 1:
long_name = test_id + '_' + name
long_rev_name = test_id + '_' + rev_name
else:
long_name = name
long_rev_name = rev_name
# the two dump files
dump1 = dir_name + '/' + test_id + '_' + src + ifile_ext
dump2 = dir_name + '/' + test_id + '_' + dst + ifile_ext
# tcpdump filters and output file names
# 'tcp[tcpflags] == tcp-ack' rule to extract only ACK packets (eliminate SYN and FIN, even if ACK also set)
filter1 = 'src host ' + src_internal + ' && src port ' + src_port + \
' && dst host ' + dst_internal + ' && dst port ' + dst_port + \
' && tcp[tcpflags] == tcp-ack'
filter2 = 'src host ' + dst_internal + ' && src port ' + dst_port + \
' && dst host ' + src_internal + ' && dst port ' + src_port + \
' && tcp[tcpflags] == tcp-ack'
out_acks1 = out_dirname + test_id + '_' + name + ofile_ext
out_acks2 = out_dirname + test_id + '_' + rev_name + ofile_ext
if long_name not in already_done and long_rev_name not in already_done:
if replot_only == '0' or not ( os.path.isfile(out_acks1) and \
os.path.isfile(out_acks2) ):
# make sure for each flow we get the ACKs captured
# at the _receiver_, hence we use filter1 with dump2 ...
# Use "-S" option to tcpdump so ACK sequence numbers are always absolute
# Grab first ACK sequence numbers for later use as a baseline
baseACK1 = local(
'zcat %s | tcpdump -c 1 -S -tt -nr - "%s" | '
'awk \'{ FS=" " ; for(i=2;i<=NF;i++) { if ( $i == "ack") { print $(i+1) } } ; }\' | sed \'s/,//\' ' %
(dump2, filter1), capture=True)
baseACK2 = local(
'zcat %s | tcpdump -c 1 -S -tt -nr - "%s" | '
'awk \'{ FS=" " ; for(i=2;i<=NF;i++) { if ( $i == "ack") { print $(i+1) } } ; }\' | sed \'s/,//\' ' %
(dump1, filter2), capture=True)
#puts('\n[MAIN] BASEACKs %s %s\n' % (baseACK1, baseACK2))
# Now extract all ACK sequence numbers, normalised to baseACK{1,2}
local(
'zcat %s | tcpdump -S -tt -nr - "%s" | '
'awk \'{ FS=" " ; for(i=2;i<=NF;i++) { if ( $i == "ack") { print $1 " " $(i+1) - %s } } ; }\' | sed \'s/,//\' > %s' %
(dump2, filter1, baseACK1, out_acks1))
local(
'zcat %s | tcpdump -S -tt -nr - "%s" | '
'awk \'{ FS=" " ; for(i=2;i<=NF;i++) { if ( $i == "ack") { print $1 " " $(i+1) - %s } } ; }\' | sed \'s/,//\' > %s' %
(dump1, filter2, baseACK2, out_acks2))
already_done[long_name] = 1
already_done[long_rev_name] = 1
if sfil.is_in(name):
if ts_correct == '1':
out_acks1 = adjust_timestamps(test_id, out_acks1, dst, ' ', out_dir)
# do the dupACK calculations and burst extraction here,
# return a new vector of one or more filenames, pointing to file(s) containing
# <time> <seq_no> <dupACKs>
#
out_acks1_dups_bursts = extract_dupACKs_bursts(acks_file = out_acks1,
burst_sep = burst_sep)
# Incorporate the extracted .N files
# as a new, expanded set of filenames to be plotted.
# Update the out_files dictionary (key=interim legend name based on flow, value=file)
# and out_groups dictionary (key=file name, value=group)
if burst_sep == 0.0:
# Assume this is a single plot (not broken into bursts)
# The plot_time_series() function expects key to have a single string
# value rather than a vector. Take the first (and presumably only)
# entry in the vector returned by extract_dupACKs_bursts()
out_files[long_name] = out_acks1_dups_bursts[0]
out_groups[out_acks1_dups_bursts[0]] = group
else:
# This trial has been broken into one or more bursts.
# plot_incast_ACK_series() knows how to parse a key having a
# 'vector of strings' value.
# Also filter the selection based on sburst/eburst nominated by user
if eburst == 0 :
eburst = len(out_acks1_dups_bursts)
# Catch case when eburst was set non-zero but also > number of actual bursts
eburst = min(eburst,len(out_acks1_dups_bursts))
if sburst <= 0 :
sburst = 1
# Catch case where sburst set greater than eburst
if sburst > eburst :
sburst = eburst
out_files[long_name] = out_acks1_dups_bursts[sburst-1:eburst]
for tmp_f in out_acks1_dups_bursts[sburst-1:eburst] :
out_groups[tmp_f] = group
if sfil.is_in(rev_name):
if ts_correct == '1':
out_acks2 = adjust_timestamps(test_id, out_acks2, src, ' ', out_dir)
# do the dupACK calculations burst extraction here
# return a new vector of one or more filenames, pointing to file(s) containing
# <time> <seq_no> <dupACKs>
#
out_acks2_dups_bursts = extract_dupACKs_bursts(acks_file = out_acks2,
burst_sep = burst_sep)
# Incorporate the extracted .N files
# as a new, expanded set of filenames to be plotted.
# Update the out_files dictionary (key=interim legend name based on flow, value=file)
# and out_groups dictionary (key=file name, value=group)
if burst_sep == 0.0:
# Assume this is a single plot (not broken into bursts)
# The plot_time_series() function expects key to have a single string
# value rather than a vector. Take the first (and presumably only)
# entry in the vector returned by extract_dupACKs_bursts()
out_files[long_rev_name] = out_acks2_dups_bursts[0]
out_groups[out_acks2_dups_bursts[0]] = group
else:
# This trial has been broken into bursts.
# plot_incast_ACK_series() knows how to parse a key having a
# 'vector of strings' value.
# Also filter the selection based on sburst/eburst nominated by user
if eburst == 0 :
eburst = len(out_acks2_dups_bursts)
# Catch case when eburst was set non-zero but also > number of actual bursts
eburst = min(eburst,len(out_acks2_dups_bursts))
if sburst <= 0 :
sburst = 1
# Catch case where sburst set greater than eburst
if sburst > eburst :
sburst = eburst
out_files[long_rev_name] = out_acks2_dups_bursts[sburst-1:eburst]
for tmp_f in out_acks2_dups_bursts[sburst-1:eburst] :
out_groups[tmp_f] = group
# if desired compute aggregate acked bytes for each experiment
# XXX only do this for burst_sep=0 now
if burst_sep == 0.0 and total_per_experiment == '1':
aggregated = {}
# first read everything in one dictionary indexed by time
flow = 0
for name in out_files:
if out_groups[out_files[name]] == group:
with open(out_files[name], 'r') as f:
lines = f.readlines()
for line in lines:
fields = line.split()
curr_time = float(fields[0])
if curr_time not in aggregated:
aggregated[curr_time] = []
aggregated[curr_time].append((flow, int(fields[1]), int(fields[2])))
flow += 1
total = {} # total cumulative values
last_flow_val = {} # last values per flow (ackbyte, dupack) tuples
last_val = (0, 0) # value from last time
# second go through by time and total
for t in sorted(aggregated.keys()):
# if there is no entry for time t, then create one
if t not in total:
total[t] = last_val # start with the last value (cumulative total)
# get delta values for ackbytes and dupacks for each value and add
for (flow, cum_byte, cum_ack) in aggregated[t]:
#print(t, flow, cum_byte, cum_ack)
if flow in last_flow_val:
byte = cum_byte - last_flow_val[flow][0]
ack = cum_ack - last_flow_val[flow][1]
else:
byte = cum_byte
ack = cum_ack
#if flow in last_flow_val:
# print(cum_byte, last_flow_val[flow][0], byte)
# add delta values to value at current time t
total[t] = (total[t][0] + byte, total[t][1] + ack)
# memorise last value
last_flow_val[flow] = (cum_byte, cum_ack)
last_val = total[t]
# write output file
out_acks1 = out_dirname + test_id + '_total' + ofile_ext
with open(out_acks1, 'w') as f:
for t in sorted(total.keys()):
f.write('%f %i %i\n' % (t, total[t][0], total[t][1]))
# replace all files for separate flows with total
delete_list = []
for name in out_files:
if out_groups[out_files[name]] == group:
delete_list.append(name)
#print(delete_list)
#print(out_files)
#print(out_groups)
for d in delete_list:
try:
del out_groups[out_files[d]]
except KeyError:
# forward and backward name match to same data file
# XXX investigate
pass
del out_files[d]
name = test_id
out_files[name] = out_acks1
out_groups[out_acks1] = group
group += 1
return (test_id_arr, out_files, out_groups)
## Extract cumulative bytes ACKnowledged and cumulative dupACKs
## SEE _extract_ackseq
@task
def extract_ackseq(test_id='', out_dir='', replot_only='0', source_filter='',
ts_correct='1', burst_sep='0.0',
sburst='1', eburst='0', total_per_experiment='0'):
"Extract cumulative bytes ACKnowledged vs time / extract incast bursts"
_extract_ackseq(test_id, out_dir, replot_only, source_filter, ts_correct,
burst_sep, sburst, eburst, total_per_experiment)
# done
puts('\n[MAIN] COMPLETED extracting ackseq %s \n' % test_id)
## Plot cumulative bytes ACKnowledged or cumulative dupACKs vs time
# @param test_id Semicolon-separated list of test ID prefixes of experiments to analyse
# @param out_dir Output directory for results
# @param replot_only '1' don't extract raw ACK vs time data per test_ID if already done,
# but still re-calculate dupACKs and bursts (if any) before plotting results
# @param source_filter Filter on specific flows to process
# @param min_values Ignore flows with equal less output values / packets
# @param omit_const '0' don't omit anything,
# '1' omit any series that are 100% constant
# (e.g. because there was no data flow)
# @param ymin Minimum value on y-axis
# @param ymax Maximum value on y-axis
# @param lnames Semicolon-separated list of legend names per flow
# (each name will have burst numbers appended if burst_sep is set)
# @param stime Start time of plot window in seconds
# (by default 0.0 = start of experiment)
# @param etime End time of plot window in seconds (by default 0.0 = end of experiment)
# @param out_name Prefix for filenames of resulting pdf files
# @param pdf_dir Output directory for pdf files (graphs), if not specified it is
# the same as out_dir
# @param ts_correct '0' use timestamps as they are (default)
# '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @param burst_sep '0' plot seq numbers as they come, relative to 1st seq number
# > '0' plot seq numbers relative to 1st seq number after gaps
# of more than burst_sep milliseconds (e.g. incast query/response bursts)
# < 0, plot seq numbers relative to 1st seq number after each abs(burst_sep)
# seconds since the first burst @ t = 0 (e.g. incast query/response bursts)
# @param sburst Start plotting with burst N (bursts are numbered from 1)
# @param eburst End plotting with burst N (bursts are numbered from 1)
# @param dupacks '0' to plot ACKed bytes vs time
# '1' to plot cumulative dupACKs vs time
# @param plot_params Parameters passed to plot function via environment variables
# @param plot_script Specify the script used for plotting, must specify full path
#
# Intermediate files end in ".acks", ".acks.N", ".acks.tscorr" or ".acks.tscorr.N"
# Output pdf files end in:
# "_ackseqno_time_series.pdf",
# "_ackseqno_bursts_time_series.pdf",
# "_comparison_ackseqno_time_series.pdf"
# "_comparison_ackseqno_bursts_time_series.pdf"
# (if dupacks=1, then as above with "dupacks" instead of "ackseqno")
@task
def analyse_ackseq(test_id='', out_dir='', replot_only='0', source_filter='',
min_values='3', omit_const='0', ymin='0', ymax='0', lnames='',
stime='0.0', etime='0.0', out_name='',
pdf_dir='', ts_correct='1', burst_sep='0.0',
sburst='1', eburst='0', dupacks='0',
plot_params='', plot_script=''):
"Plot cumulative bytes ACKnowledged vs time / extract incast bursts"
(test_id_arr,
out_files,
out_groups) = _extract_ackseq(test_id, out_dir, replot_only, source_filter,
ts_correct, burst_sep, sburst, eburst)
(out_files, out_groups) = filter_min_values(out_files, out_groups, min_values)
out_name = get_out_name(test_id_arr, out_name)
# Set plot conditions based on whether user wants dupacks or acked bytes vs time
if dupacks == '0' :
yaxistitle = 'Bytes acknowledged (Kbytes)'
ycolumn = 2
yaxisscale = (1.0/1024.0)
oname = '_ackseqno'
else :
yaxistitle = 'Cumulative dupACKs'
ycolumn = 3
yaxisscale = 1.0
oname = '_dupacks'
# NOTE: Turn off aggregation with aggr=''
if float(burst_sep) == 0.0:
# Regular plots, each trial has one file containing data
plot_time_series(out_name, out_files, yaxistitle, ycolumn, yaxisscale, 'pdf',
out_name + oname, pdf_dir=pdf_dir, aggr='',
omit_const=omit_const, ymin=float(ymin), ymax=float(ymax),
lnames=lnames, stime=stime, etime=etime, groups=out_groups,
plot_params=plot_params, plot_script=plot_script,
source_filter=source_filter)
else:
# Each trial has multiple files containing data from separate ACK bursts detected within the trial
plot_incast_ACK_series(out_name, out_files, yaxistitle, ycolumn, yaxisscale, 'pdf',
out_name + oname, pdf_dir=pdf_dir, aggr='',
omit_const=omit_const, ymin=float(ymin), ymax=float(ymax),
lnames=lnames, stime=stime, etime=etime, groups=out_groups, burst_sep=burst_sep,
sburst=int(sburst), plot_params=plot_params, plot_script=plot_script,
source_filter=source_filter)
# done
puts('\n[MAIN] COMPLETED plotting ackseq %s \n' % out_name)
## Plot goodput based on extracted ACKseq data
# @param test_id Semicolon-separated list of test ID prefixes of experiments to analyse
# @param out_dir Output directory for results
# @param replot_only '1' don't extract raw ACK vs time data per test_ID if already done,
# but still re-calculate dupACKs and bursts (if any) before plotting results
# @param source_filter Filter on specific flows to process
# @param min_values Ignore flows with equal less output values / packets
# @param omit_const '0' don't omit anything,
# '1' omit any series that are 100% constant
# (e.g. because there was no data flow)
# @param ymin Minimum value on y-axis
# @param ymax Maximum value on y-axis
# @param lnames Semicolon-separated list of legend names per flow
# (each name will have burst numbers appended if burst_sep is set)
# @param stime Start time of plot window in seconds
# (by default 0.0 = start of experiment)
# @param etime End time of plot window in seconds (by default 0.0 = end of experiment)
# @param out_name Prefix for filenames of resulting pdf files
# @param pdf_dir Output directory for pdf files (graphs), if not specified it is
# the same as out_dir
# @param ts_correct '0' use timestamps as they are (default)
# '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @param plot_params Parameters passed to plot function via environment variables
# @param plot_script Specify the script used for plotting, must specify full path
# @param total_per_experiment '0' plot per-flow goodput (default)
# '1' plot total goodput
@task
def analyse_goodput(test_id='', out_dir='', replot_only='0', source_filter='',
min_values='3', omit_const='0', ymin='0', ymax='0', lnames='',
stime='0.0', etime='0.0', out_name='',
pdf_dir='', ts_correct='1',
plot_params='', plot_script='', total_per_experiment='0'):
"Plot goodput vs time"
(test_id_arr,
out_files,
out_groups) = _extract_ackseq(test_id, out_dir, replot_only, source_filter,
ts_correct, 0, 0, 0, total_per_experiment)
(out_files, out_groups) = filter_min_values(out_files, out_groups, min_values)
out_name = get_out_name(test_id_arr, out_name)
yaxistitle = 'Goodput [kbps]'
ycolumn = 2
yaxisscale = 0.008
oname = '_goodput'
# ackseq always delivers cumulative values, instruct plot code to use the
# differences
plot_params = plot_params + 'DIFF=1'
if total_per_experiment == '0':
sort_flowkey='1'
else:
sort_flowkey='0'
# Regular plots, each trial has one file containing data
plot_time_series(out_name, out_files, yaxistitle, ycolumn, yaxisscale, 'pdf',
out_name + oname, pdf_dir=pdf_dir, aggr='1',
omit_const=omit_const, ymin=float(ymin), ymax=float(ymax),
lnames=lnames, stime=stime, etime=etime, groups=out_groups,
sort_flowkey=sort_flowkey,
plot_params=plot_params, plot_script=plot_script,
source_filter=source_filter)
# done
puts('\n[MAIN] COMPLETED plotting ackseq %s \n' % out_name)
## Generate a 2d density plot with one paramter on x, one one y and the third
## one expressed as different colours of the "blobs"
# @param exp_list List of all test IDs (allows to filter out certain experiments,
# i.e. specific value comnbinations)
# @param res_dir Directory with result files from analyse_all
# @param out_dir Output directory for result files
# @param source_filter Filter on specific sources. typically one source. if multiple sources
# are specified they are all aggregated. unlike analyse_cmpexp here we
# can't have per-source categories.
# @param min_values Ignore flows with less output values / packets
# @param xmetric Can be 'throughput', 'spprtt' (spp rtt), 'tcprtt' (unsmoothed tcp rtt), 'cwnd',
# 'tcpstat', with 'tcpstat' must specify siftr_index or web10g_index
# @param ymetric: Can be 'throughput', 'spprtt' (spp rtt), 'tcprtt' (unsmoothed tcp rtt), 'cwnd',
# 'tcpstat', with 'tcpstat' must specify siftr_index or web10g_index
# @param variables Semicolon-separated list of <var>=<value> where <value> means
# we only want experiments where <var> had the specific value
# @param out_name File name prefix
# @param xmin Minimum value on x-axis
# @param xmax Maximum value on x-axis
# @param ymin Minimum value on y-axis
# @param ymax Maximum value on y-axis
# @param lnames Semicolon-separated list of legend names
# @param group_by Semicolon-separated list of experiment variables defining the different categories
# the variables are the variable names used in the file names
# @param pdf_dir Output directory for pdf files (graphs), if not specified it
# is the same as out_dir
# @param ts_correct '0' use timestamps as they are (default)
# '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @param smoothed '0' plot non-smooth RTT (enhanced RTT in case of FreeBSD),
# '1' plot smoothed RTT estimates (non enhanced RTT in case of FreeBSD)
# @param link_len '0' throughput based on IP length (default),
# '1' throughput based on link-layer length
# @param replot_only '0' extract data
# '1' don't extract data again, just redo the plot
# @param plot_params Parameters passed to plot function via environment variables
# @param plot_script Specify the script used for plotting, must specify full path
# (default is config.TPCONF_script_path/plot_contour.R)
# @param xstat_index Integer number of the column in siftr/web10g log files (for xmetric)
# @param ystat_index Integer number of the column in siftr/web10g log files (for ymetric)
# @param dupacks '0' to plot ACKed bytes vs time
# '1' to plot dupACKs vs time
# @param cum_ackseq '0' average per time window data
# '1' cumulative counter data
# @param merge_data '0' by default don't merge data
# '1' merge data for each experiment
# @param sburst Start plotting with burst N (bursts are numbered from 1)
# @param eburst End plotting with burst N (bursts are numbered from 1)
# @param test_id_prefix Prefix used for the experiments (used to get variables
# names from the file names
# @param slowest_only '0' plot all response times (metric restime)
# '1' plot only the slowest response times for each burst
# @param query_host Name of querier (only for iqtime metric)
# NOTE: that xmin, xmax, ymin and ymax don't just zoom, but govern the selection of data points
# used for the density estimation. this is how ggplot2 works by default, although possibly
# can be changed
@task
def analyse_2d_density(exp_list='experiments_completed.txt', res_dir='', out_dir='',
source_filter='', min_values='3', xmetric='throughput',
ymetric='tcprtt', variables='', out_name='', xmin='0', xmax='0',
ymin='0', ymax='0', lnames='', group_by='aqm', replot_only='0',
pdf_dir='', ts_correct='1', smoothed='1', link_len='0',
plot_params='', plot_script='', xstat_index='', ystat_index='',
dupacks='0', cum_ackseq='1', merge_data='0',
sburst='1', eburst='0', test_id_prefix='[0-9]{8}\-[0-9]{6}_experiment_',
slowest_only='0', query_host=''):
"Bubble plot for different experiments"
test_id_pfx = ''
check = get_metric_params(xmetric, smoothed, ts_correct)
if check == None:
abort('Unknown metric %s specified with xmetric' % xmetric)
check = get_metric_params(ymetric, smoothed, ts_correct)
if check == None:
abort('Unknown metric %s specified with ymetric' % ymetric)
#if source_filter == '':
# abort('Must specify at least one source filter')
if len(source_filter.split(';')) > 12:
abort('Cannot have more than 12 filters')
# XXX more param checking
# make sure res_dir has valid form (out_dir is handled by extract methods)
res_dir = valid_dir(res_dir)
# Initialise source filter data structure
sfil = SourceFilter(source_filter)
# read test ids
experiments = read_experiment_ids(exp_list)
# get path based on first experiment id
dir_name = get_first_experiment_path(experiments)
# if we haven' got the extracted data run extract method(s) first
if res_dir == '':
for experiment in experiments:
(ex_function, kwargs) = get_extract_function(xmetric, link_len,
xstat_index, sburst=sburst, eburst=eburst,
slowest_only=slowest_only, query_host=query_host)
(dummy, out_files, out_groups) = ex_function(
test_id=experiment, out_dir=out_dir,
source_filter=source_filter,
replot_only=replot_only,
ts_correct=ts_correct,
**kwargs)
(ex_function, kwargs) = get_extract_function(ymetric, link_len,
ystat_index, sburst=sburst, eburst=eburst,
slowest_only=slowest_only, query_host=query_host)
(dummy, out_files, out_groups) = ex_function(
test_id=experiment, out_dir=out_dir,
source_filter=source_filter,
replot_only=replot_only,
ts_correct=ts_correct,
**kwargs)
if out_dir == '' or out_dir[0] != '/':
res_dir = dir_name + '/' + out_dir
else:
res_dir = out_dir
else:
if res_dir[0] != '/':
res_dir = dir_name + '/' + res_dir
# make sure we have trailing slash
res_dir = valid_dir(res_dir)
if pdf_dir == '':
pdf_dir = res_dir
else:
if pdf_dir[0] != '/':
pdf_dir = dir_name + '/' + pdf_dir
pdf_dir = valid_dir(pdf_dir)
# if pdf_dir specified create if it doesn't exist
mkdir_p(pdf_dir)
#
# build match string from variables
#
(match_str, match_str2) = build_match_strings(experiments[0], variables,
test_id_prefix)
#
# filter out the experiments to plot, generate x-axis labels, get test id prefix
#
(fil_experiments,
test_id_pfx,
dummy) = filter_experiments(experiments, match_str, match_str2)
#
# get groups based on group_by variable
#
group_idx = 1
levels = {}
groups = []
leg_names = []
_experiments = []
for experiment in fil_experiments:
level = ''
add_exp = True
for g in group_by.split(';'):
p = experiment.find(g)
if p > -1:
s = experiment.find('_', p)
s += 1
e = experiment.find('_', s)
level += g + ':' + experiment[s:e] + ' '
else:
add_exp = False
break
# remove the final space from the string
level = level[:-1]
if add_exp == True:
_experiments.append(experiment)
#print('level: ' + level)
if level not in levels:
levels[level] = group_idx
group_idx += 1
leg_names.append(level)
if merge_data == '1':
groups.append(levels[level])
else:
for i in range(len(source_filter.split(';'))):
groups.append(levels[level])
fil_experiments = _experiments
#
# get metric parameters and list of data files
#
# get the metric parameter for both x and y
x_axis_params = get_metric_params(xmetric, smoothed, ts_correct, xstat_index,
dupacks, cum_ackseq, slowest_only)
y_axis_params = get_metric_params(ymetric, smoothed, ts_correct, ystat_index,
dupacks, cum_ackseq, slowest_only)
x_ext = x_axis_params[0]
y_ext = y_axis_params[0]
# if we merge responders make sure we only use the merged files
if merge_data == '1':
# reset source filter so we match the merged file
sfil.clear()
sfil = SourceFilter('S_0.0.0.0_0')
x_files = []
y_files = []
for experiment in fil_experiments:
_x_files = []
_y_files = []
_x_ext = x_ext
_y_ext = y_ext
_files = get_testid_file_list('', experiment, _x_ext,
'LC_ALL=C sort', res_dir)
if merge_data == '1':
_x_ext += '.all'
_files = merge_data_files(_files)
_x_files += _files
_files = get_testid_file_list('', experiment, _y_ext,
'LC_ALL=C sort', res_dir)
if merge_data == '1':
_y_ext += '.all'
_files = merge_data_files(_files)
_y_files += _files
match_str = '.*_([0-9\.]*_[0-9]*_[0-9\.]*_[0-9]*)[0-9a-z_.]*' + _x_ext
for f in _x_files:
#print(f)
res = re.search(match_str, f)
#print(res.group(1))
if res and sfil.is_in(res.group(1)):
# only add file if enough data points
rows = int(
local('wc -l %s | awk \'{ print $1 }\'' %
f, capture=True))
if rows > int(min_values):
x_files.append(f)
match_str = '.*_([0-9\.]*_[0-9]*_[0-9\.]*_[0-9]*)[0-9a-z_.]*' + _y_ext
for f in _y_files:
# print(f)
res = re.search(match_str, f)
if res and sfil.is_in(res.group(1)):
# only add file if enough data points
rows = int(
local('wc -l %s | awk \'{ print $1 }\'' %
f, capture=True))
if rows > int(min_values):
y_files.append(f)
yindexes = [str(x_axis_params[2]), str(y_axis_params[2])]
yscalers = [str(x_axis_params[3]), str(y_axis_params[3])]
aggr_flags = [x_axis_params[5], y_axis_params[5]]
diff_flags = [x_axis_params[6], y_axis_params[6]]
if lnames != '':
lnames_arr = lnames.split(';')
if len(lnames_arr) != len(leg_names):
abort(
'Number of legend names must be qual to the number of source filters')
leg_names = lnames_arr
print(x_files)
print(y_files)
print(groups)
print(leg_names)
#
# pass the data files and auxilary info to plot function
#
if out_name != '':
oprefix = out_name + '_' + test_id_pfx + '_' + xmetric + '_' + ymetric
else:
oprefix = test_id_pfx + '_' + xmetric + '_' + ymetric
title = oprefix
if plot_script == '':
plot_script = 'R CMD BATCH --vanilla %s/plot_contour.R' % config.TPCONF_script_path
#local('which R')
local('TITLE="%s" XFNAMES="%s" YFNAMES="%s", LNAMES="%s" XLAB="%s" YLAB="%s" YINDEXES="%s" '
'YSCALERS="%s" XSEP="%s" YSEP="%s" OTYPE="%s" OPREFIX="%s" ODIR="%s" AGGRS="%s" '
'DIFFS="%s" XMIN="%s" XMAX="%s" YMIN="%s" YMAX="%s" GROUPS="%s" %s '
'%s %s%s_plot_contour.Rout' %
(title, ','.join(x_files), ','.join(y_files), ','.join(leg_names),
x_axis_params[1], y_axis_params[1], ','.join(yindexes), ','.join(yscalers),
x_axis_params[4], y_axis_params[4], 'pdf', oprefix, pdf_dir, ','.join(aggr_flags),
','.join(diff_flags), xmin, xmax, ymin, ymax, ','.join([str(x) for x in groups]),
plot_params, plot_script, pdf_dir, oprefix))
if config.TPCONF_debug_level == 0:
local('rm -f %s%s_plot_contour.Rout' % (pdf_dir, oprefix))
# done
puts('\n[MAIN] COMPLETED analyse_2d_density %s \n' % test_id_pfx)
## Extract inter-query times for each query burst
# @param test_id Semicolon-separated list of test ID prefixes of experiments to analyse
# @param out_dir Output directory for results
# @param replot_only '1' don't extract raw ACK vs time data per test_ID if already done,
# but still re-calculate dupACKs and bursts (if any) before plotting results
# '0' always extract raw data
# @param source_filter Filter on specific flows to process
# @param ts_correct '0' use timestamps as they are (default)
# '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @param query_host Name of the host that sent the queries
# @param by_responder '1' plot times for each responder separately
# Limitation: if by_responder=1, then this function only supports one test id
# '0' times for all responders
# @param cummulative '0' raw inter-query time for each burst
# '1' accumulated inter-query time over all bursts
# @param burst_sep 'time between burst (default 1.0), must be > 0
# @return Experiment ID list, map of flow names and file names, map of file names to group IDs
#
# Intermediate files end in ".iqtime.all ".iqtime.<responder>", ".iqtime.<responder>.tscorr"
# The files contain the following columns:
# 1. Timestamp
# 2. IP of responder
# 3. port number of responder
# 4. inter-query time, time between request and first request in burst
# 5. inter-query time, time between request and previous request
# Note 4,5 can be cumulative or non-cumulative
def _extract_incast_iqtimes(test_id='', out_dir='', replot_only='0', source_filter='',
ts_correct='1', query_host='', by_responder='1', cumulative='0',
burst_sep='1.0'):
"Extract incast inter-query times"
ifile_ext = '.dmp.gz'
ofile_ext = '.iqtimes' # inter-query times
already_done = {}
out_files = {}
out_groups = {}
burst_sep = float(burst_sep)
if query_host == '':
abort('Must specify query_host parameter')
test_id_arr = test_id.split(';')
if len(test_id_arr) == 0 or test_id_arr[0] == '':
abort('Must specify test_id parameter')
# Initialise source filter data structure
sfil = SourceFilter(source_filter)
group = 1
for test_id in test_id_arr:
# first process tcpdump files (ignore router and ctl interface tcpdumps)
tcpdump_files = get_testid_file_list('', test_id,
ifile_ext,
'grep -v "router.dmp.gz" | grep -v "ctl.dmp.gz"')
for tcpdump_file in tcpdump_files:
# get input directory name and create result directory if necessary
out_dirname = get_out_dir(tcpdump_file, out_dir)
if tcpdump_file.find(query_host) == -1:
# ignore all dump files not taken at query host
continue
# tcpdump filters and output file names
# 'tcp[tcpflags] & tcp-push != 0' rule to extract only packets with push flag set (eliminate SYN, FIN, or ACKs
# without data)
filter1 = 'tcp[tcpflags] & tcp-push != 0'
(dummy, query_host_internal) = get_address_pair_analysis(test_id, query_host, do_abort='0')
flow_name = query_host_internal + '_0_0.0.0.0_0'
name = test_id + '_' + flow_name
out1 = out_dirname + name + ofile_ext
if name not in already_done:
if replot_only == '0' or not (os.path.isfile(out1)):
# Use "-A" option to tcpdump so we get the payload bytes and can check for GET
# XXX this command fails if default snap length is changed because of the magic -B 4
local(
'zcat %s | tcpdump -A -tt -nr - "%s" | grep -B 5 "GET" | egrep "IP" | '
'awk \'{ print $1 " " $5; }\' | sed \'s/\.\([0-9]*\):/ \\1/\' > %s' %
(tcpdump_file, filter1, out1))
already_done[name] = 1
if sfil.is_in(flow_name):
if ts_correct == '1':
out1 = adjust_timestamps(test_id, out1, query_host, ' ', out_dir)
if by_responder == '0':
# all responders in in one output file
out_name = out1 + '.all'
if replot_only == '0' or not (os.path.isfile(out_name)):
last_time = 0.0
burst_start = 0.0
cum_time = 0.0
out_f = open(out_name, 'w')
with open(out1) as f:
lines = f.readlines()
for line in lines:
fields = line.split()
time = float(fields[0])
if burst_start == 0.0:
burst_start = time
if line != lines[:-1] and last_time != 0.0 and time - last_time >= burst_sep:
cum_time += (last_time - burst_start)
burst_start = time
last_req_time = time
else:
last_req_time = last_time
if last_req_time == 0.0:
last_req_time = time
if cumulative == '0':
out_f.write('%s %f %f\n' % (' '.join(fields), (time - burst_start), (time - last_req_time)))
else:
out_f.write('%s %f %f\n' % (' '.join(fields), cum_time + (time - burst_start),
cum_time + (time - last_req_time)))
last_time = float(time)
out_f.close()
out_files[name] = out_name
out_groups[out_name] = group
else:
# split inter-query times into multiple files by responder
# XXX ignore replot_only here, cause too difficult to check
last_time = 0.0
burst_start = 0.0
responders = {}
cum_time = {}
with open(out1) as f:
lines = f.readlines()
for line in lines:
fields = line.split()
time = float(fields[0])
responder = fields[1] + '.' + fields[2]
if responder not in responders:
out_name = out1 + '.' + responder
responders[responder] = open(out_name, 'w')
out_files[responder] = out_name
cum_time[responder] = 0
out_f = responders[responder]
if burst_start == 0.0:
burst_start = time
if line != lines[:-1] and last_time != 0.0 and time - last_time >= burst_sep:
#cum_time[responder] += (last_time - burst_start)
burst_start = time
last_req_time = time
else:
last_req_time = last_time
if last_req_time == 0.0:
last_req_time = time
if cumulative == '0':
out_f.write('%s %f %f\n' % (' '.join(fields), (time - burst_start), (time - last_req_time)))
else:
out_f.write('%s %f %f\n' % (' '.join(fields), cum_time[responder] + (time - burst_start),
cum_time[responder] + (time - last_req_time)))
cum_time[responder] += time - burst_start
last_time = float(time)
for out_f in responders.values():
out_f.close()
# sort by responder name and set groups (ip+port)
for responder in sorted(responders.keys()):
out_name = out1 + '.' + responder
out_groups[out_name] = group
group += 1
if by_responder == '0':
group += 1
else:
group = 1
return (test_id_arr, out_files, out_groups)
## Extract inter-query times for each query burst
## SEE _extract_incast_iqtimes()
@task
def extract_incast_iqtimes(test_id='', out_dir='', replot_only='0', source_filter='',
ts_correct='1', query_host='', by_responder='1', cumulative='0',
burst_sep='1.0'):
"Extract incast inter-query times"
_extract_incast_iqtimes(test_id, out_dir, replot_only, source_filter, ts_correct,
query_host, by_responder, cumulative, burst_sep)
# done
puts('\n[MAIN] COMPLETED extracting incast inter-query times %s \n' % test_id)
## Plot inter-query times
# @param test_id Semicolon-separated list of test ID prefixes of experiments to analyse
# @param out_dir Output directory for results
# @param replot_only '1' don't extract raw data per test_ID if already done,
# '0' always extract raw data
# @param source_filter Filter on specific flows to process
# @param ts_correct '0' use timestamps as they are (default)
# '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @param query_host Name of the host that sent the queries
# @param by_responder '1' plot times for each responder separately
# '0' times for all responders
# @param cumulative '0' raw inter-query time for each burst
# '1' accumulated inter-query time over all bursts
# @param burst_sep Time between burst (default 1.0), must be > 0
# @param min_values Ignore flows with equal less output values / packets
# @param omit_const '0' don't omit anything,
# '1' omit any series that are 100% constant
# (e.g. because there was no data flow)
# @param out_name File name prefix for resulting pdf file
# @param diff_to_burst_start '0' print time diferences between requests, i.e.
# the times are the differences between request and previous
# request
# '1' print time differences between requests and first requests in
# burst (default)
# @param ymin Minimum value on y-axis
# @param ymax Maximum value on y-axis
# @param lnames Semicolon-separated list of legend names
# @param stime Start time of plot window in seconds
# (by default 0.0 = start of experiment)
# @param etime End time of plot window in seconds (by default 0.0 = end of experiment)
# @param pdf_dir Output directory for pdf files (graphs), if not specified it
# is the same as out_dir
# @param ts_correct '0' use timestamps as they are (default)
# '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @param plot_params Parameters passed to plot function via environment variables
# @param plot_script Specify the script used for plotting, must specify full path
# (default is config.TPCONF_script_path/plot_contour.R)
#
# Note setting cumulative=1 and diff_to_burst_start=0 does produce a graph, but the
# graph does not make any sense.
@task
def analyse_incast_iqtimes(test_id='', out_dir='', replot_only='0', source_filter='',
ts_correct='1', query_host='', by_responder='1', cumulative='0',
burst_sep='1.0', min_values='3', omit_const='0', ymin='0', ymax='0', lnames='',
stime='0.0', etime='0.0', out_name='', diff_to_burst_start='1',
pdf_dir='', plot_params='', plot_script=''):
"Plot incast inter-query times"
if query_host == '':
abort('Must specify query_host parameter')
(test_id_arr,
out_files,
out_groups) = _extract_incast_iqtimes(test_id, out_dir, replot_only, source_filter,
ts_correct, query_host, by_responder, cumulative, burst_sep)
(out_files, out_groups) = filter_min_values(out_files, out_groups, min_values)
out_name = get_out_name(test_id_arr, out_name)
if cumulative == '0':
ylabel = 'Inter-query time (ms)'
else:
ylabel = 'Cumulative Inter-query time (ms)'
if diff_to_burst_start == '1':
ycolumn = 4
else:
ycolumn = 5
if by_responder == '0' and cumulative == '0':
out_name_add = '_iqtimes'
elif by_responder == '0' and cumulative == '1':
out_name_add = '_cum_iqtimes'
elif by_responder == '1' and cumulative == '0':
out_name_add = '_iqtimes_responders'
else:
out_name_add = '_cum_iqtimes_responders'
plot_time_series(out_name, out_files, ylabel, ycolumn, 1000, 'pdf',
out_name + out_name_add, pdf_dir=pdf_dir, aggr='',
sort_flowkey='0', omit_const=omit_const, ymin=float(ymin), ymax=float(ymax),
lnames=lnames, stime=stime, etime=etime, groups=out_groups,
plot_params=plot_params, plot_script=plot_script,
source_filter=source_filter)
# done
puts('\n[MAIN] COMPLETED plotting incast inter-query times %s\n' % out_name)
## Extract response times for each responder for incast experiments from tcpdump data
# @param test_id Semicolon-separated list of test ID prefixes of experiments to analyse
# @param out_dir Output directory for results
# @param replot_only '1' don't extract raw data per test_ID if already done,
# '0' always extract raw data
# @param source_filter Filter on specific flows to process
# @param ts_correct '0' use timestamps as they are (default)
# '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @param query_host Name of the host that sent the queries (s specified in config)
# @param slowest_only '0' plot response times for individual responders
# '1' plot slowest response time across all responders
# '2' plot time between first request and last response finished
#
# Intermediate files end in ".restimes", ".restimes.tscorr"
# The files contain the following columns:
# 1. Timestamp the GET was sent
# 2. Burst number
# 3. Querier IP.port
# 4. Responder IP.port
# 5. Response time [seconds]
def _extract_incast_restimes(test_id='', out_dir='', replot_only='0', source_filter='',
ts_correct='1', query_host='', slowest_only='0'):
"Extract incast response times"
ifile_ext = '.dmp.gz'
ofile_ext = '.restimes'
abort_extract = False
already_done = {}
out_files = {}
out_groups = {}
if query_host == '':
abort('Must specify query_host parameter')
test_id_arr = test_id.split(';')
if len(test_id_arr) == 0 or test_id_arr[0] == '':
abort('Must specify test_id parameter')
# Initialise source filter data structure
sfil = SourceFilter(source_filter)
group = 1
for test_id in test_id_arr:
# first process tcpdump files (ignore router and ctl interface tcpdumps)
tcpdump_files = get_testid_file_list('', test_id,
ifile_ext,
'grep -v "router.dmp.gz" | grep -v "ctl.dmp.gz"')
for tcpdump_file in tcpdump_files:
# get input directory name and create result directory if necessary
out_dirname = get_out_dir(tcpdump_file, out_dir)
dir_name = os.path.dirname(tcpdump_file)
if tcpdump_file.find(query_host) == -1:
# ignore all dump files not taken at query host
continue
# unique flows
flows = lookup_flow_cache(tcpdump_file)
if flows == None:
flows = _list(local('zcat %s | tcpdump -nr - "tcp" | '
'awk \'{ if ( $2 == "IP" ) { print $3 " " $5 " tcp" } }\' | '
'sed "s/://" | '
'sed "s/\.\([0-9]*\) /,\\1 /g" | sed "s/ /,/g" | '
'LC_ALL=C sort -u' %
tcpdump_file, capture=True))
append_flow_cache(tcpdump_file, flows)
# since client sends first packet to server, client-to-server flows
# will always be first
for flow in flows:
src, src_port, dst, dst_port, proto = flow.split(',')
# get external and internal addresses
src, src_internal = get_address_pair_analysis(test_id, src, do_abort='0')
dst, dst_internal = get_address_pair_analysis(test_id, dst, do_abort='0')
if src == '' or dst == '':
continue
# ignore flows with querier as destination
if dst == query_host:
continue
# flow name
name = src_internal + '_' + src_port + \
'_' + dst_internal + '_' + dst_port
# test id plus flow name
if len(test_id_arr) > 1:
long_name = test_id + '_' + name
else:
long_name = name
# the two dump files
dump1 = dir_name + '/' + test_id + '_' + src + ifile_ext
# tcpdump filters and output file names
# 'tcp[tcpflags] & tcp-push != 0' rule to extract only packets with push flag set
# (eliminate SYN, FIN, or ACKs without data)
filter1 = 'host ' + dst_internal + ' && port ' + dst_port + \
' && tcp[tcpflags] & tcp-push != 0'
out1_tmp = out_dirname + test_id + '_' + name + ofile_ext + '.tmp'
out1 = out_dirname + test_id + '_' + name + ofile_ext
if long_name not in already_done:
if replot_only == '0' or not ( os.path.isfile(out1) ):
# Use "-A" option to tcpdump so we get the payload bytes
# XXX this falls apart if snap size is not the default because of the magic -B 8
local(
'zcat %s | tcpdump -A -tt -nr - "%s" | grep -B 10 "GET" | egrep "IP" | '
'awk \'{ print $1 " " $3 " " $5; }\' | sed \'s/://\' > %s' %
(dump1, filter1, out1_tmp))
# get the last line, assume this is last packet of last request
local('zcat %s | tcpdump -tt -nr - "%s" | tail -1 | '
'awk \'{ print $1 " " $3 " " $5; }\' | sed \'s/://\' >> %s' %
(dump1, filter1, out1_tmp))
# compute response times from each GET packet and corresponding final data packet
out_f = open(out1, 'w')
with open(out1_tmp) as f:
lines = f.readlines()
cnt = 0
last_src = ''
for line in lines:
fields = line.split()
if cnt % 2 == 0:
# request
req_time = float(line.split()[0])
elif fields[1] != last_src:
# response, unless the source is the same as for the last packet
# (then we possibly have no response)
res_time = float(fields[0]) - req_time
out_f.write('%f %i %s %s %s\n' % (req_time, int(cnt/2) + 1, fields[2],
fields[1], res_time))
last_src = fields[1]
cnt += 1
out_f.close()
os.remove(out1_tmp)
already_done[long_name] = 1
if sfil.is_in(name):
if ts_correct == '1':
out1 = adjust_timestamps(test_id, out1, dst, ' ', out_dir)
out_files[long_name] = out1
out_groups[out1] = group
# check for consistency and abort if we see less response times for one responder
max_cnt = 0
for name in out_files:
if out_groups[out_files[name]] == group:
cnt = int(local('wc -l %s | awk \'{ print $1 }\'' %
out_files[name], capture=True))
if max_cnt > 0 and cnt < max_cnt:
abort('Responder timed out in experiment %s' % test_id)
if cnt > max_cnt:
max_cnt = cnt
group += 1
if slowest_only != '0':
(out_files, out_groups) = get_slowest_response_time(out_files, out_groups,
int(slowest_only) - 1)
return (test_id_arr, out_files, out_groups)
## Extract response times for each responder for incast experiments
## SEE _extract_restimes()
@task
def extract_incast_restimes(test_id='', out_dir='', replot_only='0', source_filter='',
ts_correct='1', query_host=''):
"Extract incast response times"
_extract_incast_restimes(test_id, out_dir, replot_only, source_filter, ts_correct,
query_host)
# done
puts('\n[MAIN] COMPLETED extracting incast response times %s \n' % test_id)
## Extract packet loss for flows using custom tool
## XXX tool uses packet hash based on UDP/TCP payload, so only works with traffic
## that has unique payload bytes
## The extracted files have an extension of .loss. The format is CSV with the
## columns:
## 1. Timestamp RTT measured (seconds.microseconds)
## 2. 0/1 (0=arrived, 1=lost)
# @param test_id Test ID prefix of experiment to analyse
# @param out_dir Output directory for results
# @param replot_only Don't extract data again that is already extracted
# @param source_filter Filter on specific sources
# @param ts_correct '0' use timestamps as they are (default)
# '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @return Test ID list, map of flow names to interim data file names and
# map of file names and group IDs
def _extract_pktloss(test_id='', out_dir='', replot_only='0', source_filter='',
ts_correct='1'):
"Extract packet loss of flows"
ifile_ext = '.dmp.gz'
ofile_ext = '.loss'
already_done = {}
out_files = {}
out_groups = {}
test_id_arr = test_id.split(';')
if len(test_id_arr) == 0 or test_id_arr[0] == '':
abort('Must specify test_id parameter')
# Initialise source filter data structure
sfil = SourceFilter(source_filter)
#local('which pktloss.py')
group = 1
for test_id in test_id_arr:
# first process tcpdump files (ignore router and ctl interface tcpdumps)
tcpdump_files = get_testid_file_list('', test_id,
ifile_ext,
'grep -v "router.dmp.gz" | grep -v "ctl.dmp.gz"')
for tcpdump_file in tcpdump_files:
# get input directory name and create result directory if necessary
out_dirname = get_out_dir(tcpdump_file, out_dir)
dir_name = os.path.dirname(tcpdump_file)
# get unique flows
flows = lookup_flow_cache(tcpdump_file)
if flows == None:
flows = _list(local('zcat %s | tcpdump -nr - "tcp" | '
'awk \'{ if ( $2 == "IP" ) { print $3 " " $5 " tcp" } }\' | '
'sed "s/://" | '
'sed "s/\.\([0-9]*\) /,\\1 /g" | sed "s/ /,/g" | '
'LC_ALL=C sort -u' %
tcpdump_file, capture=True))
flows += _list(local('zcat %s | tcpdump -nr - "udp" | '
'awk \'{ if ( $2 == "IP" ) { print $3 " " $5 " udp" } }\' | '
'sed "s/://" | '
'sed "s/\.\([0-9]*\) /,\\1 /g" | sed "s/ /,/g" | '
'LC_ALL=C sort -u' %
tcpdump_file, capture=True))
append_flow_cache(tcpdump_file, flows)
# since client sends first packet to server, client-to-server flows
# will always be first
for flow in flows:
src, src_port, dst, dst_port, proto = flow.split(',')
# get external and internal addresses
src, src_internal = get_address_pair_analysis(test_id, src, do_abort='0')
dst, dst_internal = get_address_pair_analysis(test_id, dst, do_abort='0')
if src == '' or dst == '':
continue
# flow name
name = src_internal + '_' + src_port + \
'_' + dst_internal + '_' + dst_port
rev_name = dst_internal + '_' + dst_port + \
'_' + src_internal + '_' + src_port
# test id plus flow name
if len(test_id_arr) > 1:
long_name = test_id + '_' + name
long_rev_name = test_id + '_' + rev_name
else:
long_name = name
long_rev_name = rev_name
if long_name not in already_done and long_rev_name not in already_done:
# the two dump files
dump1 = dir_name + '/' + test_id + '_' + src + ifile_ext
dump2 = dir_name + '/' + test_id + '_' + dst + ifile_ext
# filters for pktloss.py
filter1 = src_internal + ':' + src_port + ':' + dst_internal + ':' + dst_port
filter2 = dst_internal + ':' + dst_port + ':' + src_internal + ':' + src_port
# output file names
out_loss = out_dirname + test_id + '_' + name + ofile_ext
rev_out_loss = out_dirname + test_id + '_' + rev_name + ofile_ext
if replot_only == '0' or not ( os.path.isfile(out_loss) and \
os.path.isfile(rev_out_loss) ):
# compute loss
local(
'pktloss.py -t %s -T %s -f %s > %s' %
(dump1, dump2, filter1, out_loss))
local(
'pktloss.py -t %s -T %s -f %s > %s' %
(dump2, dump1, filter2, rev_out_loss))
already_done[long_name] = 1
already_done[long_rev_name] = 1
if sfil.is_in(name):
if ts_correct == '1':
out_loss = adjust_timestamps(test_id, out_loss, src, ' ', out_dir)
out_files[long_name] = out_loss
out_groups[out_loss] = group
if sfil.is_in(rev_name):
if ts_correct == '1':
rev_out_loss = adjust_timestamps(test_id, rev_out_loss, dst, ' ',
out_dir)
out_files[long_rev_name] = rev_out_loss
out_groups[rev_out_loss] = group
group += 1
return (test_id_arr, out_files, out_groups)
## Extract packet loss for flows
## SEE _extract_pktloss()
@task
def extract_pktloss(test_id='', out_dir='', replot_only='0', source_filter='',
ts_correct='1'):
"Extract packet loss of flows"
_extract_pktloss(test_id, out_dir, replot_only, source_filter,
ts_correct)
# done
puts('\n[MAIN] COMPLETED extracting packet loss %s \n' % test_id)
## Plot packet loss rate for flows
# @param test_id Test ID prefix of experiment to analyse
# @param out_dir Output directory for results
# @param replot_only Don't extract data again, just redo the plot
# @param source_filter Filter on specific sources
# @param min_values Minimum number of data points in file, if fewer points
# the file is ignored
# @param omit_const '0' don't omit anything,
# '1' omit any series that are 100% constant
# (e.g. because there was no data flow)
# @param ymin Minimum value on y-axis
# @param ymax Maximum value on y-axis
# @param lnames Semicolon-separated list of legend names
# @param stime Start time of plot window in seconds
# (by default 0.0 = start of experiment)
# @param etime End time of plot window in seconds
# (by default 0.0 = end of experiment)
# @param out_name Name prefix for resulting pdf file
# @param pdf_dir Output directory for pdf files (graphs), if not specified it is
# the same as out_dir
# @param ts_correct '0' use timestamps as they are (default)
# '1' correct timestamps based on clock offsets estimated
# from broadcast pings
# @param plot_params Set env parameters for plotting
# @param plot_script Specify the script used for plotting, must specify full path
@task
def analyse_pktloss(test_id='', out_dir='', replot_only='0', source_filter='',
min_values='3', omit_const='0', ymin='0', ymax='0',
lnames='', stime='0.0', etime='0.0', out_name='', pdf_dir='',
ts_correct='1', plot_params='', plot_script=''):
"Plot packet loss rate of flows"
(test_id_arr,
out_files,
out_groups) = _extract_pktloss(test_id, out_dir, replot_only,
source_filter, ts_correct)
(out_files, out_groups) = filter_min_values(out_files, out_groups, min_values)
out_name = get_out_name(test_id_arr, out_name)
plot_time_series(out_name, out_files, 'Packet loss (%)', 2, 1.0, 'pdf',
out_name + '_pktloss', pdf_dir=pdf_dir, omit_const=omit_const,
ymin=float(ymin), ymax=float(ymax), lnames=lnames, aggr='2',
stime=stime, etime=etime, groups=out_groups, plot_params=plot_params,
plot_script=plot_script, source_filter=source_filter)
# done
puts('\n[MAIN] COMPLETED plotting packet loss rate %s \n' % out_name)
| {
"content_hash": "dab440d70b2f6c932580a7190e0613cf",
"timestamp": "",
"source": "github",
"line_count": 5202,
"max_line_length": 148,
"avg_line_length": 43.85928489042676,
"alnum_prop": 0.5405161380809621,
"repo_name": "knneth/teacup",
"id": "6f70491af363e180c3800ce2839a6281a134d876",
"size": "229760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analyse.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "590745"
},
{
"name": "R",
"bytes": "87675"
},
{
"name": "Shell",
"bytes": "43296"
}
],
"symlink_target": ""
} |
import logging
import re
import argparse
import service_conf
from http.cookies import SimpleCookie
from urllib.parse import parse_qs
import sys
from saml2 import BINDING_HTTP_REDIRECT
from saml2 import BINDING_SOAP
from saml2 import time_util
from saml2 import ecp
from saml2 import BINDING_HTTP_ARTIFACT
from saml2 import BINDING_HTTP_POST
from saml2.client import Saml2Client
from saml2.ecp_client import PAOS_HEADER_INFO
from saml2.httputil import geturl, make_cookie, parse_cookie
from saml2.httputil import get_post
from saml2.httputil import Response
from saml2.httputil import BadRequest
from saml2.httputil import ServiceError
from saml2.httputil import SeeOther
from saml2.httputil import Unauthorized
from saml2.httputil import NotFound
from saml2.httputil import Redirect
from saml2.httputil import NotImplemented
from saml2.response import StatusError
from saml2.response import VerificationError
from saml2.s_utils import UnknownPrincipal
from saml2.s_utils import UnsupportedBinding
from saml2.s_utils import sid
from saml2.s_utils import rndbytes
#from srtest import exception_trace
logger = logging.getLogger("")
hdlr = logging.FileHandler('spx.log')
base_formatter = logging.Formatter(
"%(asctime)s %(name)s:%(levelname)s %(message)s")
hdlr.setFormatter(base_formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
SP = None
SEED = ""
POLICY = None
def dict_to_table(ava, lev=0, width=1):
txt = ['<table border=%s bordercolor="black">\n' % width]
for prop, valarr in list(ava.items()):
txt.append("<tr>\n")
if isinstance(valarr, str):
txt.append("<th>%s</th>\n" % str(prop))
try:
txt.append("<td>%s</td>\n" % valarr.encode("utf8"))
except AttributeError:
txt.append("<td>%s</td>\n" % valarr)
elif isinstance(valarr, list):
i = 0
n = len(valarr)
for val in valarr:
if not i:
txt.append("<th rowspan=%d>%s</td>\n" % (len(valarr), prop))
else:
txt.append("<tr>\n")
if isinstance(val, dict):
txt.append("<td>\n")
txt.extend(dict_to_table(val, lev + 1, width - 1))
txt.append("</td>\n")
else:
try:
txt.append("<td>%s</td>\n" % val.encode())
except AttributeError:
txt.append("<td>%s</td>\n" % val.encode())
if n > 1:
txt.append("</tr>\n")
n -= 1
i += 1
elif isinstance(valarr, dict):
txt.append("<th>%s</th>\n" % prop)
txt.append("<td>\n")
txt.extend(dict_to_table(valarr, lev + 1, width - 1))
txt.append("</td>\n")
txt.append("</tr>\n")
txt.append('</table>\n')
return txt
def handle_static(environ, start_response, path):
"""
Creates a response for a static file. There might be a longer path
then just /static/... if so strip the path leading up to static.
:param environ: wsgi enviroment
:param start_response: wsgi start response
:param path: the static file and path to the file.
:return: wsgi response for the static file.
"""
try:
text = open(path).read()
if path.endswith(".ico"):
resp = Response(text, headers=[('Content-Type', "image/x-icon")])
elif path.endswith(".html"):
resp = Response(text, headers=[('Content-Type', 'text/html')])
elif path.endswith(".txt"):
resp = Response(text, headers=[('Content-Type', 'text/plain')])
elif path.endswith(".css"):
resp = Response(text, headers=[('Content-Type', 'text/css')])
elif path.endswith(".js"):
resp = Response(text, headers=[('Content-Type', 'text/javascript')])
elif path.endswith(".png"):
resp = Response(text, headers=[('Content-Type', 'image/png')])
else:
resp = Response(text)
except IOError:
resp = NotFound()
return resp(environ, start_response)
class ECPResponse(object):
code = 200
title = 'OK'
def __init__(self, content):
self.content = content
#noinspection PyUnusedLocal
def __call__(self, environ, start_response):
start_response('%s %s' % (self.code, self.title),
[('Content-Type', "text/xml")])
return [self.content]
def _expiration(timeout, tformat=None):
# Wed, 06-Jun-2012 01:34:34 GMT
if not tformat:
tformat = '%a, %d-%b-%Y %T GMT'
if timeout == "now":
return time_util.instant(tformat)
else:
# validity time should match lifetime of assertions
return time_util.in_a_while(minutes=timeout, format=tformat)
class Cache(object):
def __init__(self):
self.uid2user = {}
self.cookie_name = "spauthn"
self.outstanding_queries = {}
self.relay_state = {}
self.user = {}
self.result = {}
def kaka2user(self, kaka):
logger.debug("KAKA: %s" % kaka)
if kaka:
cookie_obj = SimpleCookie(kaka)
morsel = cookie_obj.get(self.cookie_name, None)
if morsel:
try:
return self.uid2user[morsel.value]
except KeyError:
return None
else:
logger.debug("No spauthn cookie")
return None
def delete_cookie(self, environ=None, kaka=None):
if not kaka:
kaka = environ.get("HTTP_COOKIE", '')
logger.debug("delete KAKA: %s" % kaka)
if kaka:
_name = self.cookie_name
cookie_obj = SimpleCookie(kaka)
morsel = cookie_obj.get(_name, None)
cookie = SimpleCookie()
cookie[_name] = ""
cookie[_name]['path'] = "/"
logger.debug("Expire: %s" % morsel)
cookie[_name]["expires"] = _expiration("dawn")
return tuple(cookie.output().split(": ", 1))
return None
def user2kaka(self, user):
uid = rndbytes(32)
self.uid2user[uid] = user
cookie = SimpleCookie()
cookie[self.cookie_name] = uid
cookie[self.cookie_name]['path'] = "/"
cookie[self.cookie_name]["expires"] = _expiration(480)
logger.debug("Cookie expires: %s" % cookie[self.cookie_name]["expires"])
return tuple(cookie.output().split(": ", 1))
# -----------------------------------------------------------------------------
# RECEIVERS
# -----------------------------------------------------------------------------
class Service(object):
def __init__(self, environ, start_response, user=None):
self.environ = environ
logger.debug("ENVIRON: %s" % environ)
self.start_response = start_response
self.user = user
self.sp = None
def unpack_redirect(self):
if "QUERY_STRING" in self.environ:
_qs = self.environ["QUERY_STRING"]
return dict([(k, v[0]) for k, v in list(parse_qs(_qs).items())])
else:
return None
def unpack_post(self):
_dict = parse_qs(get_post(self.environ))
logger.debug("unpack_post:: %s" % _dict)
try:
return dict([(k, v[0]) for k, v in list(_dict.items())])
except Exception:
return None
def unpack_soap(self):
try:
query = get_post(self.environ)
return {"SAMLResponse": query, "RelayState": ""}
except Exception:
return None
def unpack_either(self):
if self.environ["REQUEST_METHOD"] == "GET":
_dict = self.unpack_redirect()
elif self.environ["REQUEST_METHOD"] == "POST":
_dict = self.unpack_post()
else:
_dict = None
logger.debug("_dict: %s" % _dict)
return _dict
def operation(self, _dict, binding):
logger.debug("_operation: %s" % _dict)
if not _dict:
resp = BadRequest('Error parsing request or no request')
return resp(self.environ, self.start_response)
else:
try:
_relay_state = _dict[b"RelayState"]
except KeyError:
_relay_state = ""
if b"SAMLResponse" in _dict:
return self.do(_dict[b"SAMLResponse"], binding,
_relay_state, mtype="response")
elif b"SAMLRequest" in _dict:
return self.do(_dict[b"SAMLRequest"], binding,
_relay_state, mtype="request")
def artifact_operation(self, _dict):
if not _dict:
resp = BadRequest("Missing query")
return resp(self.environ, self.start_response)
else:
# exchange artifact for response
request = self.sp.artifact2message(_dict[b"SAMLart"], "spsso")
return self.do(request, BINDING_HTTP_ARTIFACT, _dict["RelayState"])
def response(self, binding, http_args):
if binding == BINDING_HTTP_ARTIFACT:
resp = Redirect()
else:
resp = Response(http_args["data"], headers=http_args["headers"])
return resp(self.environ, self.start_response)
def do(self, query, binding, relay_state="", mtype="response"):
pass
def redirect(self):
""" Expects a HTTP-redirect response """
_dict = self.unpack_redirect()
return self.operation(_dict, BINDING_HTTP_REDIRECT)
def post(self):
""" Expects a HTTP-POST response """
_dict = self.unpack_post()
return self.operation(_dict, BINDING_HTTP_POST)
def artifact(self):
# Can be either by HTTP_Redirect or HTTP_POST
_dict = self.unpack_either()
return self.artifact_operation(_dict)
def soap(self):
"""
Single log out using HTTP_SOAP binding
"""
logger.debug("- SOAP -")
_dict = self.unpack_soap()
logger.debug("_dict: %s" % _dict)
return self.operation(_dict, BINDING_SOAP)
def uri(self):
_dict = self.unpack_either()
return self.operation(_dict, BINDING_SOAP)
def not_authn(self):
resp = Unauthorized('Unknown user')
return resp(self.environ, self.start_response)
# -----------------------------------------------------------------------------
# Attribute Consuming service
# -----------------------------------------------------------------------------
class ACS(Service):
def __init__(self, sp, environ, start_response, cache=None, **kwargs):
Service.__init__(self, environ, start_response)
self.sp = sp
self.outstanding_queries = cache.outstanding_queries
self.cache = cache
self.response = None
self.kwargs = kwargs
def do(self, response, binding, relay_state="", mtype="response"):
"""
:param response: The SAML response, transport encoded
:param binding: Which binding the query came in over
"""
#tmp_outstanding_queries = dict(self.outstanding_queries)
if not response:
logger.info("Missing Response")
resp = Unauthorized('Unknown user')
return resp(self.environ, self.start_response)
try:
self.response = self.sp.parse_authn_request_response(
response, binding, self.outstanding_queries)
except UnknownPrincipal as excp:
logger.error("UnknownPrincipal: %s" % (excp,))
resp = ServiceError("UnknownPrincipal: %s" % (excp,))
return resp(self.environ, self.start_response)
except UnsupportedBinding as excp:
logger.error("UnsupportedBinding: %s" % (excp,))
resp = ServiceError("UnsupportedBinding: %s" % (excp,))
return resp(self.environ, self.start_response)
except VerificationError as err:
resp = ServiceError("Verification error: %s" % (err,))
return resp(self.environ, self.start_response)
except Exception as err:
resp = ServiceError("Other error: %s" % (err,))
return resp(self.environ, self.start_response)
logger.info("AVA: %s" % self.response.ava)
resp = Response(dict_to_table(self.response.ava))
return resp(self.environ, self.start_response)
def verify_attributes(self, ava):
logger.info("SP: %s" % self.sp.config.entityid)
rest = POLICY.get_entity_categories_restriction(
self.sp.config.entityid, self.sp.metadata)
akeys = [k.lower() for k in list(ava.keys())]
res = {"less": [], "more": []}
for key, attr in list(rest.items()):
if key not in ava:
if key not in akeys:
res["less"].append(key)
for key, attr in list(ava.items()):
_key = key.lower()
if _key not in rest:
res["more"].append(key)
return res
# -----------------------------------------------------------------------------
# REQUESTERS
# -----------------------------------------------------------------------------
class SSO(object):
def __init__(self, sp, environ, start_response, cache=None,
wayf=None, discosrv=None, bindings=None):
self.sp = sp
self.environ = environ
self.start_response = start_response
self.cache = cache
self.idp_query_param = "IdpQuery"
self.wayf = wayf
self.discosrv = discosrv
if bindings:
self.bindings = bindings
else:
self.bindings = [BINDING_HTTP_REDIRECT, BINDING_HTTP_POST,
BINDING_HTTP_ARTIFACT]
logger.debug("--- SSO ---")
def response(self, binding, http_args, do_not_start_response=False):
if binding == BINDING_HTTP_ARTIFACT:
resp = Redirect()
elif binding == BINDING_HTTP_REDIRECT:
for param, value in http_args["headers"]:
if param == "Location":
resp = SeeOther(str(value))
break
else:
resp = ServiceError("Parameter error")
else:
resp = Response(http_args["data"], headers=http_args["headers"])
if do_not_start_response:
return resp
else:
return resp(self.environ, self.start_response)
def _wayf_redirect(self, came_from):
sid_ = sid()
self.cache.outstanding_queries[sid_] = came_from
logger.debug("Redirect to WAYF function: %s" % self.wayf)
return -1, SeeOther(headers=[('Location', "%s?%s" % (self.wayf, sid_))])
def _pick_idp(self, came_from):
"""
If more than one idp and if none is selected, I have to do wayf or
disco
"""
_cli = self.sp
logger.debug("[_pick_idp] %s" % self.environ)
if "HTTP_PAOS" in self.environ:
if self.environ["HTTP_PAOS"] == PAOS_HEADER_INFO:
if 'application/vnd.paos+xml' in self.environ["HTTP_ACCEPT"]:
# Where should I redirect the user to
# entityid -> the IdP to use
# relay_state -> when back from authentication
logger.debug("- ECP client detected -")
_rstate = rndbytes()
self.cache.relay_state[_rstate] = geturl(self.environ)
_entityid = _cli.config.ecp_endpoint(
self.environ["REMOTE_ADDR"])
if not _entityid:
return -1, ServiceError("No IdP to talk to")
logger.debug("IdP to talk to: %s" % _entityid)
return ecp.ecp_auth_request(_cli, _entityid, _rstate)
else:
return -1, ServiceError('Faulty Accept header')
else:
return -1, ServiceError('unknown ECP version')
# Find all IdPs
idps = self.sp.metadata.with_descriptor("idpsso")
idp_entity_id = None
kaka = self.environ.get("HTTP_COOKIE", '')
if kaka:
try:
(idp_entity_id, _) = parse_cookie("ve_disco", "SEED_SAW", kaka)
except ValueError:
pass
except TypeError:
pass
# Any specific IdP specified in a query part
query = self.environ.get("QUERY_STRING")
if not idp_entity_id and query:
try:
_idp_entity_id = dict(parse_qs(query))[
self.idp_query_param][0]
if _idp_entity_id in idps:
idp_entity_id = _idp_entity_id
except KeyError:
logger.debug("No IdP entity ID in query: %s" % query)
pass
if not idp_entity_id:
if self.wayf:
if query:
try:
wayf_selected = dict(parse_qs(query))[
"wayf_selected"][0]
except KeyError:
return self._wayf_redirect(came_from)
idp_entity_id = wayf_selected
else:
return self._wayf_redirect(came_from)
elif self.discosrv:
if query:
idp_entity_id = _cli.parse_discovery_service_response(
query=self.environ.get("QUERY_STRING"))
if not idp_entity_id:
sid_ = sid()
self.cache.outstanding_queries[sid_] = came_from
logger.debug("Redirect to Discovery Service function")
eid = _cli.config.entityid
ret = _cli.config.getattr("endpoints",
"sp")["discovery_response"][0][0]
ret += "?sid=%s" % sid_
loc = _cli.create_discovery_service_request(
self.discosrv, eid, **{"return": ret})
return -1, SeeOther(loc)
elif len(idps) == 1:
# idps is a dictionary
idp_entity_id = list(idps.keys())[0]
elif not len(idps):
return -1, ServiceError('Misconfiguration')
else:
return -1, NotImplemented("No WAYF or DS present!")
logger.info("Chosen IdP: '%s'" % idp_entity_id)
return 0, idp_entity_id
def redirect_to_auth(self, _cli, entity_id, came_from, vorg_name=""):
try:
_binding, destination = _cli.pick_binding(
"single_sign_on_service", self.bindings, "idpsso",
entity_id=entity_id)
logger.debug("binding: %s, destination: %s" % (_binding,
destination))
req = _cli.create_authn_request(destination, vorg=vorg_name)
_rstate = rndbytes()
self.cache.relay_state[_rstate] = came_from
ht_args = _cli.apply_binding(_binding, "%s" % req, destination,
relay_state=_rstate)
_sid = req.id
logger.debug("ht_args: %s" % ht_args)
except Exception as exc:
logger.exception(exc)
resp = ServiceError(
"Failed to construct the AuthnRequest: %s" % exc)
return resp(self.environ, self.start_response)
# remember the request
self.cache.outstanding_queries[_sid] = came_from
return self.response(_binding, ht_args, do_not_start_response=True)
def do(self):
_cli = self.sp
# Which page was accessed to get here
came_from = geturl(self.environ)
logger.debug("[sp.challenge] RelayState >> '%s'" % came_from)
# Am I part of a virtual organization or more than one ?
try:
vorg_name = _cli.vorg._name
except AttributeError:
vorg_name = ""
logger.debug("[sp.challenge] VO: %s" % vorg_name)
# If more than one idp and if none is selected, I have to do wayf
(done, response) = self._pick_idp(came_from)
# Three cases: -1 something went wrong or Discovery service used
# 0 I've got an IdP to send a request to
# >0 ECP in progress
logger.debug("_idp_pick returned: %s" % done)
if done == -1:
return response(self.environ, self.start_response)
elif done > 0:
self.cache.outstanding_queries[done] = came_from
return ECPResponse(response)
else:
entity_id = response
# Do the AuthnRequest
resp = self.redirect_to_auth(_cli, entity_id, came_from, vorg_name)
return resp(self.environ, self.start_response)
# ----------------------------------------------------------------------------
#noinspection PyUnusedLocal
def not_found(environ, start_response):
"""Called if no URL matches."""
resp = NotFound('Not Found')
return resp(environ, start_response)
# ----------------------------------------------------------------------------
#noinspection PyUnusedLocal
def main(environ, start_response, _sp):
_sso = SSO(_sp, environ, start_response, cache=CACHE, **ARGS)
return _sso.do()
#noinspection PyUnusedLocal
def verify_login_cookie(environ, start_response, _sp):
_sso = SSO(_sp, environ, start_response, cache=CACHE, **ARGS)
return _sso.do()
def disco(environ, start_response, _sp):
query = parse_qs(environ["QUERY_STRING"])
entity_id = query["entityID"][0]
_sid = query["sid"][0]
came_from = CACHE.outstanding_queries[_sid]
_sso = SSO(_sp, environ, start_response, cache=CACHE, **ARGS)
resp = _sso.redirect_to_auth(_sso.sp, entity_id, came_from)
# Add cookie
kaka = make_cookie("ve_disco", entity_id, "SEED_SAW")
resp.headers.append(kaka)
return resp(environ, start_response)
# ----------------------------------------------------------------------------
# map urls to functions
urls = [
# Hmm, place holder, NOT used
('place', ("holder", None)),
(r'^$', main),
(r'^login', verify_login_cookie),
(r'^disco', disco)
]
def add_urls():
base = "acs"
urls.append(("%s/post$" % base, (ACS, "post", SP)))
urls.append(("%s/post/(.*)$" % base, (ACS, "post", SP)))
urls.append(("%s/redirect$" % base, (ACS, "redirect", SP)))
urls.append(("%s/redirect/(.*)$" % base, (ACS, "redirect", SP)))
# ----------------------------------------------------------------------------
def application(environ, start_response):
"""
The main WSGI application. Dispatch the current request to
the functions from above.
If nothing matches call the `not_found` function.
:param environ: The HTTP application environment
:param start_response: The application to run when the handling of the
request is done
:return: The response as a list of lines
"""
path = environ.get('PATH_INFO', '').lstrip('/')
logger.debug("<application> PATH: '%s'" % path)
logger.debug("Finding callback to run")
try:
for regex, spec in urls:
match = re.search(regex, path)
if match is not None:
if isinstance(spec, tuple):
callback, func_name, _sp = spec
cls = callback(_sp, environ, start_response, cache=CACHE)
func = getattr(cls, func_name)
return func()
else:
return spec(environ, start_response, SP)
if re.match(".*static/.*", path):
return handle_static(environ, start_response, path)
return not_found(environ, start_response)
except StatusError as err:
logging.error("StatusError: %s" % err)
resp = BadRequest("%s" % err)
return resp(environ, start_response)
except Exception as err:
#_err = exception_trace("RUN", err)
#logging.error(exception_trace("RUN", _err))
print(err, file=sys.stderr)
resp = ServiceError("%s" % err)
return resp(environ, start_response)
# ----------------------------------------------------------------------------
PORT = service_conf.PORT
# ------- HTTPS -------
# These should point to relevant files
SERVER_CERT = service_conf.SERVER_CERT
SERVER_KEY = service_conf.SERVER_KEY
# This is of course the certificate chain for the CA that signed
# you cert and all the way up to the top
CERT_CHAIN = service_conf.CERT_CHAIN
if __name__ == '__main__':
from cherrypy import wsgiserver
_parser = argparse.ArgumentParser()
_parser.add_argument('-d', dest='debug', action='store_true',
help="Print debug information")
_parser.add_argument('-D', dest='discosrv',
help="Which disco server to use")
_parser.add_argument('-s', dest='seed',
help="Cookie seed")
_parser.add_argument('-W', dest='wayf', action='store_true',
help="Which WAYF url to use")
_parser.add_argument("config", help="SAML client config")
ARGS = {}
_args = _parser.parse_args()
if _args.discosrv:
ARGS["discosrv"] = _args.discosrv
if _args.wayf:
ARGS["wayf"] = _args.wayf
CACHE = Cache()
CNFBASE = _args.config
if _args.seed:
SEED = _args.seed
else:
SEED = "SnabbtInspel"
SP = Saml2Client(config_file="%s" % CNFBASE)
POLICY = service_conf.POLICY
add_urls()
SRV = wsgiserver.CherryPyWSGIServer(('0.0.0.0', PORT), application)
# if service_conf.HTTPS:
# SRV.ssl_adapter = ssl_pyopenssl.pyOpenSSLAdapter(SERVER_CERT,
# SERVER_KEY, CERT_CHAIN)
logger.info("Server starting")
print("SP listening on port: %s" % PORT)
try:
SRV.start()
except KeyboardInterrupt:
SRV.stop()
| {
"content_hash": "ac1b3605bff6b50ba64ff559b7c9a728",
"timestamp": "",
"source": "github",
"line_count": 738,
"max_line_length": 82,
"avg_line_length": 35.48780487804878,
"alnum_prop": 0.5369606720122184,
"repo_name": "rohe/pysaml2-3",
"id": "ea83982a387c775685d0fcd353d627d959a39360",
"size": "26213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/sp-wsgi/sp.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "5367558"
},
{
"name": "Shell",
"bytes": "6973"
}
],
"symlink_target": ""
} |
import math
import struct
import pyboof
import numpy as np
import py4j.java_gateway as jg
from pyboof.common import *
from pyboof.calib import *
class ConfigEssentialMatrix(JavaConfig):
def __init__(self, java_object=None):
if java_object is None:
JavaConfig.__init__(self, "boofcv.factory.geo.ConfigEssential")
else:
JavaWrapper.__init__(self, java_object)
class ConfigRansac(JavaConfig):
def __init__(self, java_object=None):
if java_object is None:
JavaConfig.__init__(self, "boofcv.factory.geo.ConfigRansac")
else:
JavaWrapper.__init__(self, java_object)
class ModelMatcher(JavaWrapper):
def __init__(self, java_object):
JavaWrapper.__init__(self, java_object)
self.model_parameters = None
self.match_set = None
self.input_indexes = []
self.fit_quality = 0
self.minimum_size = java_object.getMinimumSize()
def process(self, data_set):
# TODO use type information (not available yet) to convert the dataset.
java_list = pyboof.p2b_list_AssociatedPair(data_set)
if not self.java_obj.process(java_list):
return False
# TODO convert model based on model type info
self.model_parameters = pyboof.Se3_F64(self.java_obj.getModelParameters())
self.match_set = pyboof.b2p_list_AssociatedPair(self.java_obj.getMatchSet())
self.input_indexes = [0]*len(self.match_set)
for i in range(len(self.input_indexes)):
self.input_indexes[i] = self.java_obj.getInputIndex(i)
self.fit_quality = self.java_obj.getFitQuality()
return True
class ModelMatcherMultiview(ModelMatcher):
def __init__(self, java_object):
ModelMatcher.__init__(self, java_object)
def set_intrinsic(self, view:int , intrinsic:CameraPinhole ):
"""
Specifies intrinsic parameters for each view
:param view: Index of the view
:param intrinsic: Intrinsic camera parameters
"""
self.java_obj.setIntrinsic(view,intrinsic.convert_to_boof())
def get_number_of_views(self):
"""
The number of views which need to have camera parameters specified
"""
return self.java_obj.getNumberOfViews()
class StitchingFromMotion2D(JavaWrapper):
def __init__(self, java_object, image_type): # Remove when getImageType() is added
JavaWrapper.__init__(self, java_object)
self.image_type = image_type
def configure(self, mosaic_width:int, mosaic_height:int, scale:float = 1.0 ):
# Hard code it to scale the iamge down and start in the center
homography = JavaWrapper(pbg.gateway.jvm.georegression.struct.homography.Homography2D_F64())
homography.a11 = scale
homography.a22 = scale
homography.a13 = mosaic_width/2 - (scale*mosaic_width/2)
homography.a23 = mosaic_height/2 - (scale*mosaic_height/2)
homography = JavaWrapper(homography.java_obj.invert(None))
self.java_obj.configure(mosaic_width, mosaic_height, homography.java_obj)
def process(self, image):
return self.java_obj.process(image)
def reset(self):
self.java_obj.reset()
def set_origin_to_current(self):
self.java_obj.setOriginToCurrent()
def get_stitched_image(self):
return self.java_obj.getStitchedImage()
def get_image_type(self):
return self.image_type
class FactoryMultiViewRobust:
def __init__(self):
pass
@staticmethod
def baseline_ransac(config_essential, config_ransac):
"""
Estimates the stereo baseline (SE3) between two images.
:param config_essential:
:type config_essential: ConfigEssentialMatrix
:param config_ransac:
:type config_ransac: ConfigRansac
:return:
:rtype: ModelMatcherMultiview
"""
mm = pbg.gateway.jvm.boofcv.factory.geo.FactoryMultiViewRobust. \
baselineRansac(config_essential.java_obj, config_ransac.java_obj)
return ModelMatcherMultiview(mm)
class FactoryVideoMosaic:
def __init__(self, dtype):
self.boof_image_class = dtype_to_Class_SingleBand(dtype)
# TODO remove when getImageType() is added
self.image_type = create_ImageType(Family.PLANAR, dtype, 3)
def mosaic(self, config_tracker:pyboof.ConfigPointTracker):
java_object = pbg.gateway.jvm.pyboof.FactoryPyBoofTemp. \
basicVideoMosaic(config_tracker.java_obj, self.boof_image_class)
return StitchingFromMotion2D(java_object, self.image_type) | {
"content_hash": "8c3dcf26ca6d104982f2c04c818cfd04",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 100,
"avg_line_length": 34.5,
"alnum_prop": 0.6590958252217175,
"repo_name": "lessthanoptimal/PyBoof",
"id": "bb146f96f63e048ba48cbc05179db292a8ae8ff8",
"size": "4623",
"binary": false,
"copies": "1",
"ref": "refs/heads/SNAPSHOT",
"path": "pyboof/sfm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "25209"
},
{
"name": "Python",
"bytes": "232072"
}
],
"symlink_target": ""
} |
"""Various PEST(++) control file peripheral operations"""
from __future__ import print_function, division
import os
import warnings
import multiprocessing as mp
import re
import numpy as np
import pandas as pd
pd.options.display.max_colwidth = 100
import pyemu
from ..pyemu_warnings import PyemuWarning
# formatters
# SFMT = lambda x: "{0:>20s}".format(str(x.decode()))
def SFMT(item):
try:
s = "{0:<20s} ".format(item.decode())
except:
s = "{0:<20s} ".format(str(item))
return s
SFMT_LONG = lambda x: "{0:<50s} ".format(str(x))
IFMT = lambda x: "{0:<10d} ".format(int(x))
FFMT = lambda x: "{0:<20.10E} ".format(float(x))
def str_con(item):
if len(item) == 0:
return np.NaN
return item.lower().strip()
pst_config = {}
# parameter stuff
pst_config["tied_dtype"] = np.dtype([("parnme", "U20"), ("partied", "U20")])
pst_config["tied_fieldnames"] = ["parnme", "partied"]
pst_config["tied_format"] = {"parnme": SFMT, "partied": SFMT}
pst_config["tied_converters"] = {"parnme": str_con, "partied": str_con}
pst_config["tied_defaults"] = {"parnme": "dum", "partied": "dum"}
pst_config["par_dtype"] = np.dtype(
[
("parnme", "U20"),
("partrans", "U20"),
("parchglim", "U20"),
("parval1", np.float64),
("parlbnd", np.float64),
("parubnd", np.float64),
("pargp", "U20"),
("scale", np.float64),
("offset", np.float64),
("dercom", np.int64),
]
)
pst_config["par_fieldnames"] = (
"PARNME PARTRANS PARCHGLIM PARVAL1 PARLBND PARUBND " + "PARGP SCALE OFFSET DERCOM"
)
pst_config["par_fieldnames"] = pst_config["par_fieldnames"].lower().strip().split()
pst_config["par_format"] = {
"parnme": SFMT,
"partrans": SFMT,
"parchglim": SFMT,
"parval1": FFMT,
"parlbnd": FFMT,
"parubnd": FFMT,
"pargp": SFMT,
"scale": FFMT,
"offset": FFMT,
"dercom": IFMT,
}
pst_config["par_alias_map"] = {
"name": "parnme",
"transform": "partrans",
"value": "parval1",
"upper_bound": "parubnd",
"lower_bound": "parlbnd",
"group": "pargp",
}
pst_config["par_converters"] = {
"parnme": str_con,
"pargp": str_con,
"parval1": np.float64,
"parubnd": np.float64,
"parlbnd": np.float64,
"scale": np.float64,
"offset": np.float64,
}
pst_config["par_defaults"] = {
"parnme": "dum",
"partrans": "log",
"parchglim": "factor",
"parval1": 1.0,
"parlbnd": 1.1e-10,
"parubnd": 1.1e10,
"pargp": "pargp",
"scale": 1.0,
"offset": 0.0,
"dercom": 1,
}
# parameter group stuff
pst_config["pargp_dtype"] = np.dtype(
[
("pargpnme", "U20"),
("inctyp", "U20"),
("derinc", np.float64),
("derinclb", np.float64),
("forcen", "U20"),
("derincmul", np.float64),
("dermthd", "U20"),
("splitthresh", np.float64),
("splitreldiff", np.float64),
("splitaction", "U20"),
]
)
pst_config["pargp_fieldnames"] = (
"PARGPNME INCTYP DERINC DERINCLB FORCEN DERINCMUL "
+ "DERMTHD SPLITTHRESH SPLITRELDIFF SPLITACTION"
)
pst_config["pargp_fieldnames"] = pst_config["pargp_fieldnames"].lower().strip().split()
pst_config["pargp_format"] = {
"pargpnme": SFMT,
"inctyp": SFMT,
"derinc": FFMT,
"forcen": SFMT,
"derincmul": FFMT,
"dermthd": SFMT,
"splitthresh": FFMT,
"splitreldiff": FFMT,
"splitaction": SFMT,
}
pst_config["pargp_converters"] = {
"pargpnme": str_con,
"inctyp": str_con,
"dermethd": str_con,
"derinc": np.float64,
"derinclb": np.float64,
"splitaction": str_con,
"forcen": str_con,
"derincmul": np.float64,
}
pst_config["pargp_defaults"] = {
"pargpnme": "pargp",
"inctyp": "relative",
"derinc": 0.01,
"derinclb": 0.0,
"forcen": "switch",
"derincmul": 2.0,
"dermthd": "parabolic",
"splitthresh": 1.0e-5,
"splitreldiff": 0.5,
"splitaction": "smaller",
}
# observation stuff
pst_config["obs_fieldnames"] = "OBSNME OBSVAL WEIGHT OBGNME".lower().split()
pst_config["obs_dtype"] = np.dtype(
[
("obsnme", "U20"),
("obsval", np.float64),
("weight", np.float64),
("obgnme", "U20"),
]
)
pst_config["obs_format"] = {
"obsnme": SFMT,
"obsval": FFMT,
"weight": FFMT,
"obgnme": SFMT,
}
pst_config["obs_converters"] = {
"obsnme": str_con,
"obgnme": str_con,
"weight": np.float64,
"obsval": np.float64,
}
pst_config["obs_defaults"] = {
"obsnme": "dum",
"obsval": 1.0e10,
"weight": 1.0,
"obgnme": "obgnme",
}
pst_config["obs_alias_map"] = {"name": "obsnme", "value": "obsval", "group": "obgnme"}
# prior info stuff
pst_config["null_prior"] = pd.DataFrame({"pilbl": None, "obgnme": None}, index=[])
pst_config["prior_format"] = {
"pilbl": SFMT,
"equation": SFMT_LONG,
"weight": FFMT,
"obgnme": SFMT,
}
pst_config["prior_fieldnames"] = ["pilbl", "equation", "weight", "obgnme"]
pst_config["model_io_fieldnames"] = ["pest_file", "model_file"]
pst_config["model_io_format"] = {"pest_file": SFMT_LONG, "model_file": SFMT_LONG}
pst_config["null_model_io"] = pd.DataFrame(
{"pest_file": None, "model_file": None}, index=[]
)
pst_config["model_io_defaults"] = {"pest_file": "pest_file", "model_file": "model_file"}
# other containers
pst_config["model_command"] = []
# pst_config["template_files"] = []
# pst_config["input_files"] = []
# pst_config["instruction_files"] = []
# pst_config["output_files"] = []
pst_config["other_lines"] = []
pst_config["tied_lines"] = []
pst_config["regul_lines"] = []
pst_config["pestpp_options"] = {}
def read_resfile(resfile):
"""load a PEST-style residual file into a pandas.DataFrame
Args:
resfile (`str`): path and name of an existing residual file
Returns:
`pandas.DataFrame`: a dataframe of info from the residuals file.
Column names are the names from the residuals file: "name", "group",
"measured", "modelled" (with two "L"s), "residual", "weight".
Example::
df = pyemu.pst_utils.read_resfile("my.res")
df.residual.plot(kind="hist")
"""
assert os.path.exists(
resfile
), "read_resfile() error: resfile " + "{0} not found".format(resfile)
converters = {"name": str_con, "group": str_con}
f = open(resfile, "r")
while True:
line = f.readline()
if line == "":
raise Exception(
"Pst.get_residuals: EOF before finding "
+ "header in resfile: "
+ resfile
)
if "name" in line.lower():
header = line.lower().strip().split()
break
res_df = pd.read_csv(
f, header=None, names=header, sep=r"\s+", converters=converters,
usecols=header #on_bad_lines='skip'
)
# strip the "Cov.", "Mat." and "na" strings that PEST records in the *.res file; make float
float_cols = [x for x in res_df.columns if x not in ['name','group']]
res_df[float_cols] = res_df[float_cols].replace(['Cov.', 'Mat.', 'na'], np.nan).astype(float)
res_df.index = res_df.name
f.close()
return res_df
def res_from_en(pst, enfile):
"""load ensemble results from PESTPP-IES into a PEST-style
residuals `pandas.DataFrame`
Args:
enfile (`str`): CSV-format ensemble file name
Returns:
`pandas.DataFrame`: a dataframe with the same columns as a
residual dataframe (a la `pst_utils.read_resfile()`)
Note:
If a "base" realization is found in the ensemble, it is used
as the "modelled" column in the residuals dataframe. Otherwise,
the mean of the ensemble is used as "modelled"
Example::
df = pyemu.pst_utils.res_from_en("my.0.obs.csv")
df.residual.plot(kind="hist")
"""
converters = {"name": str_con, "group": str_con}
obs = pst.observation_data
if isinstance(enfile, str):
df = pd.read_csv(enfile, converters=converters)
df.columns = df.columns.str.lower()
df = df.set_index("real_name").T.rename_axis("name").rename_axis(None, 1)
else:
df = enfile.T
if "base" in df.columns:
modelled = df["base"]
std = df.std(axis=1)
else:
modelled = df.mean(axis=1)
std = df.std(axis=1)
# probably a more pandastic way to do this
res_df = pd.DataFrame({"modelled": modelled, "std": std}, index=obs.obsnme.values)
res_df["group"] = obs["obgnme"].copy()
res_df["measured"] = obs["obsval"].copy()
res_df["weight"] = obs["weight"].copy()
res_df["residual"] = res_df["measured"] - res_df["modelled"]
return res_df
def read_parfile(parfile):
"""load a PEST-style parameter value file into a pandas.DataFrame
Args:
parfile (`str`): path and name of existing parameter file
Returns:
`pandas.DataFrame`: a dataframe with columns of "parnme", "parval1",
"scale" and "offset"
Example::
df = pyemu.pst_utils.read_parfile("my.par1")
"""
if not os.path.exists(parfile):
raise Exception(
"pst_utils.read_parfile: parfile not found: {0}".format(parfile)
)
f = open(parfile, "r")
header = f.readline()
par_df = pd.read_csv(
f, header=None, names=["parnme", "parval1", "scale", "offset"], sep=r"\s+"
)
par_df.index = par_df.parnme
return par_df
def write_parfile(df, parfile):
"""write a PEST-style parameter file from a dataframe
Args:
df (`pandas.DataFrame`): a dataframe with column names
that correspond to the entries in the parameter data
section of the pest control file
parfile (`str`): name of the parameter file to write
Example::
pyemu.pst_utils.write_parfile(pst.parameter_data,"my.par")
"""
columns = ["parnme", "parval1", "scale", "offset"]
formatters = {
"parnme": lambda x: "{0:20s}".format(x),
"parval1": lambda x: "{0:20.7E}".format(x),
"scale": lambda x: "{0:20.7E}".format(x),
"offset": lambda x: "{0:20.7E}".format(x),
}
for col in columns:
assert (
col in df.columns
), "write_parfile() error: " + "{0} not found in df".format(col)
with open(parfile, "w") as f:
f.write("single point\n")
f.write(
df.to_string(
col_space=0,
columns=columns,
formatters=formatters,
justify="right",
header=False,
index=False,
index_names=False,
)
+ "\n"
)
def parse_tpl_file(tpl_file):
"""parse a PEST-style template file to get the parameter names
Args:
tpl_file (`str`): path and name of a template file
Returns:
[`str`] : list of parameter names found in `tpl_file`
Example::
par_names = pyemu.pst_utils.parse_tpl_file("my.tpl")
"""
par_names = set()
with open(tpl_file, "r") as f:
try:
header = f.readline().strip().split()
assert header[0].lower() in [
"ptf",
"jtf",
], "template file error: must start with [ptf,jtf], not:" + str(header[0])
assert (
len(header) == 2
), "template file error: header line must have two entries: " + str(header)
marker = header[1]
assert (
len(marker) == 1
), "template file error: marker must be a single character, not:" + str(
marker
)
for line in f:
par_line = set(line.lower().strip().split(marker)[1::2])
par_names.update(par_line)
# par_names.extend(par_line)
# for p in par_line:
# if p not in par_names:
# par_names.append(p)
except Exception as e:
raise Exception(
"error processing template file " + tpl_file + " :\n" + str(e)
)
# par_names = [pn.strip().lower() for pn in par_names]
# seen = set()
# seen_add = seen.add
# return [x for x in par_names if not (x in seen or seen_add(x))]
return [p.strip() for p in list(par_names)]
def write_input_files(pst, pst_path="."):
"""write parameter values to model input files
Args:
pst (`pyemu.Pst`): a Pst instance
pst_path (`str`): the path to where the control file and template
files reside. Default is '.'.
Note:
This function uses template files with the current parameter \
values (stored in `pst.parameter_data.parval1`).
This function uses multiprocessing - one process per template file
This is a simple implementation of what PEST does. It does not
handle all the special cases, just a basic function...user beware
"""
par = pst.parameter_data
par.loc[:, "parval1_trans"] = (par.parval1 * par.scale) + par.offset
pairs = np.array(list(zip(pst.template_files, pst.input_files)))
num_tpl = len(pairs)
chunk_len = 50
num_chunk_floor = num_tpl // chunk_len
main_chunks = (
pairs[: num_chunk_floor * chunk_len].reshape([-1, chunk_len, 2]).tolist()
) # the list of files broken down into chunks
remainder = pairs[num_chunk_floor * chunk_len :].tolist() # remaining files
chunks = main_chunks + [remainder]
# procs = []
# for chunk in chunks:
# # write_to_template(pst.parameter_data.parval1_trans,os.path.join(pst_path,tpl_file),
# # os.path.join(pst_path,in_file))
# p = mp.Process(
# target=_write_chunk_to_template,
# args=[chunk, pst.parameter_data.parval1_trans, pst_path],
# )
# p.start()
# procs.append(p)
# for p in procs:
# p.join()
pool = mp.Pool(processes=min(mp.cpu_count(), len(chunks), 60))
x = [
pool.apply_async(
_write_chunk_to_template,
args=(chunk, pst.parameter_data.parval1_trans, pst_path),
)
for i, chunk in enumerate(chunks)
]
[xx.get() for xx in x]
pool.close()
pool.join()
def _write_chunk_to_template(chunk, parvals, pst_path):
for tpl_file, in_file in chunk:
tpl_file = os.path.join(pst_path, tpl_file)
in_file = os.path.join(pst_path, in_file)
write_to_template(parvals, tpl_file, in_file)
def write_to_template(parvals, tpl_file, in_file):
"""write parameter values to a model input file using
the corresponding template file
Args:
parvals (`dict`): a container of parameter names and values. Can
also be a `pandas.Series`
tpl_file (`str`): path and name of a template file
in_file (`str`): path and name of model input file to write
Examples::
pyemu.pst_utils.write_to_template(par.parameter_data.parval1,
"my.tpl","my.input")
"""
f_in = open(in_file, "w")
f_tpl = open(tpl_file, "r")
header = f_tpl.readline().strip().split()
if header[0].lower() not in ["ptf", "jtf"]:
raise Exception(
"template file error: must start with [ptf,jtf], not:" + str(header[0])
)
if len(header) != 2:
raise Exception(
"template file error: header line must have two entries: " + str(header)
)
marker = header[1]
if len(marker) != 1:
raise Exception(
"template file error: marker must be a single character, not:" + str(marker)
)
for line in f_tpl:
if marker not in line:
f_in.write(line)
else:
line = line.rstrip()
par_names = line.lower().split(marker)[1::2]
par_names = [name.strip() for name in par_names]
start, end = _get_marker_indices(marker, line)
if len(par_names) != len(start):
raise Exception("par_names != start")
new_line = line[: start[0]]
between = [line[e:s] for s, e in zip(start[1:], end[:-1])]
for i, name in enumerate(par_names):
s, e = start[i], end[i]
w = e - s
if w > 15:
d = 6
else:
d = 3
fmt = "{0:" + str(w) + "." + str(d) + "E}"
val_str = fmt.format(parvals[name])
new_line += val_str
if i != len(par_names) - 1:
new_line += between[i]
new_line += line[end[-1] :]
f_in.write(new_line + "\n")
f_tpl.close()
f_in.close()
def _get_marker_indices(marker, line):
"""method to find the start and end parameter markers
on a template file line. Used by write_to_template()
"""
indices = [i for i, ltr in enumerate(line) if ltr == marker]
start = indices[0:-1:2]
end = [i + 1 for i in indices[1::2]]
assert len(start) == len(end)
return start, end
def parse_ins_file(ins_file):
"""parse a PEST-style instruction file to get observation names
Args:
ins_file (`str`): path and name of an existing instruction file
Returns:
[`str`]: a list of observation names found in `ins_file`
Note:
This is a basic function for parsing instruction files to
look for observation names.
Example::
obs_names = pyemu.pst_utils.parse_ins_file("my.ins")
"""
obs_names = []
with open(ins_file, "r") as f:
header = f.readline().strip().split()
assert header[0].lower() in [
"pif",
"jif",
], "instruction file error: must start with [pif,jif], not:" + str(header[0])
marker = header[1]
assert (
len(marker) == 1
), "instruction file error: marker must be a single character, not:" + str(
marker
)
for line in f:
line = line.lower()
if marker in line:
# this still only returns and obs if "[": "]", "(": ")", "!": "!" in items
raw = line.strip().split(marker)
for item in raw[::2]:
if len(item) > 1:
# possible speedup, only attempting to parse if item
# is more than 1 char
obs_names.extend(_parse_ins_string(item))
else:
obs_names.extend(_parse_ins_string(line.strip()))
# obs_names = [on.strip().lower() for on in obs_names]
return obs_names
def _parse_ins_string(string):
"""split up an instruction file line to get the observation names"""
istart_markers = set(["[", "(", "!"])
marker_dict = {"[": "]", "(": ")", "!": "!"}
# iend_markers = set(["]",")","!"])
setdum = {"dum", "DUM"}
obs_names = []
slen = len(string)
idx = 0
while True:
if idx >= slen - 1:
break
char = string[idx]
if char in istart_markers:
# em = iend_markers[istart_markers.index(char)]
em = marker_dict[char]
# print("\n",idx)
# print(string)
# print(string[idx+1:])
# print(string[idx+1:].index(em))
# print(string[idx+1:].index(em)+idx+1)
eidx = min(slen, string.find(em, idx + 1))
obs_name = string[idx + 1 : eidx]
if obs_name not in setdum:
obs_names.append(obs_name)
idx = eidx + 1
else:
idx += 1
return obs_names
def _populate_dataframe(index, columns, default_dict, dtype):
"""helper function to populate a generic Pst dataframe attribute.
Note:
This function is called as part of constructing a generic Pst instance
"""
new_df = pd.concat(
[pd.Series(default_dict[fieldname],
index=index,
name=fieldname).astype(dt[1])
for fieldname, dt in zip(columns, dtype.descr)],
axis=1
)
return new_df
def generic_pst(par_names=["par1"], obs_names=["obs1"], addreg=False):
"""generate a generic pst instance.
Args:
par_names ([`str`], optional): parameter names to include in the new
`pyemu.Pst`. Default is ["par2"].
obs_names ([`str`], optional): observation names to include in the new
`pyemu.Pst`. Default is ["obs1"].
addreg (`bool`): flag to add zero-order Tikhonov prior information
equations to the new control file
Returns:
`pyemu.Pst`: a new control file instance. This instance does not have
all the info needed to run, but is a placeholder that can then be
filled in later.
Example::
par_names = ["par1","par2"]
obs_names = ["obs1","obs2"]
pst = pyemu.pst_utils.generic_pst(par_names,obs_names]
"""
if not isinstance(par_names, list):
par_names = list(par_names)
if not isinstance(obs_names, list):
obs_names = list(obs_names)
new_pst = pyemu.Pst("pest.pst", load=False)
pargp_data = _populate_dataframe(
["pargp"], new_pst.pargp_fieldnames, new_pst.pargp_defaults, new_pst.pargp_dtype
)
new_pst.parameter_groups = pargp_data
par_data = _populate_dataframe(
par_names, new_pst.par_fieldnames, new_pst.par_defaults, new_pst.par_dtype
)
par_data.loc[:, "parnme"] = par_names
par_data.index = par_names
par_data.sort_index(inplace=True)
new_pst.parameter_data = par_data
obs_data = _populate_dataframe(
obs_names, new_pst.obs_fieldnames, new_pst.obs_defaults, new_pst.obs_dtype
)
obs_data.loc[:, "obsnme"] = obs_names
obs_data.index = obs_names
obs_data.sort_index(inplace=True)
new_pst.observation_data = obs_data
# new_pst.template_files = ["file.tpl"]
# new_pst.input_files = ["file.in"]
# new_pst.instruction_files = ["file.ins"]
# new_pst.output_files = ["file.out"]
new_pst.model_command = ["model.bat"]
new_pst.prior_information = new_pst.null_prior
# new_pst.other_lines = ["* singular value decomposition\n","1\n",
# "{0:d} {1:15.6E}\n".format(new_pst.npar_adj,1.0E-6),
# "1 1 1\n"]
if addreg:
new_pst.zero_order_tikhonov()
return new_pst
def try_read_input_file_with_tpl(tpl_file, input_file=None):
"""attempt to read parameter values from an input file using a template file
Args:
tpl_file (`str`): path and name of a template file
input_file (`str`,optional): path and name of existing model
input file to process. If `None`, `tpl_file.replace(".tpl","")`
is used. Default is None.
Returns:
`pandas.DataFrame`: a dataframe of parameter name and values
extracted from `input_file`.
Note:
If an exception is raised when reading the input file, the exception
is echoed to the screen and `None` is returned.
Example::
df = pyemu.pst_utils.try_process_output_file("my.tpl","my.input")
"""
if input_file is None:
input_file = tpl_file.replace(".tpl", "")
if not os.path.exists(input_file):
return None
# read the names first to see what we are dealing with
# and also to do some basic error checking
parnames = parse_tpl_file(tpl_file)
try:
df = _read_infile_with_tplfile(tpl_file, input_file)
except Exception as e:
print("error trying to read input file with tpl file:{0}".format(str(e)))
return None
return df
def _read_infile_with_tplfile(tpl_file, input_file):
"""attempt to read parameter values from an input file using a template file,
raising heaps of exceptions.
Args:
tpl_file (`str`): path and name of a template file
input_file (`str`): path and name of existing model
Returns:
`pandas.DataFrame`: a dataframe of parameter name and values
extracted from `input_file`.
Note:
use try_read_inputfile_with_tpl instead of this one.
"""
if not os.path.exists(input_file):
raise Exception("input file '{0}' not found".format(input_file))
f_tpl = open(tpl_file, "r")
f_in = open(input_file, "r")
# read the tpl header
_, marker = f_tpl.readline().split()
itpl, iin = 1, 0
pnames, pvals = [], []
pdict = {}
while True:
tpl_line = f_tpl.readline()
if tpl_line == "":
break
in_line = f_in.readline()
if in_line == "":
raise Exception(
"input file EOF, tpl file line {0}, in file line {1}".format(itpl, iin)
)
if marker in tpl_line:
idxs = [i for i, ltr in enumerate(tpl_line) if ltr == marker]
if len(idxs) % 2 != 0:
raise Exception("unbalanced markers on tpl line {0}".format(itpl))
for s, e in zip(idxs[0:-1:2], idxs[1::2]):
tpl_str = tpl_line[s : e + 1]
pname = tpl_str.replace(marker, "").strip().lower()
if s > len(in_line):
raise Exception(
"input file EOL line {0}, tpl line {1}, looking for {2}".format(
iin, itpl, tpl_str
)
)
junk_val = "Jennyigotunumber8675309"
tmp = tpl_line[:s] + " {} ".format(junk_val) + tpl_line[e + 1 :]
if len(tmp.split()) == len(in_line.split()):
# treat this as whitespace delimited
in_str = in_line.split()[tmp.split().index(junk_val)]
else:
# or we must assume the params are written using the same spacing as template file
in_str = in_line[s : e + 1]
try:
v = float(in_str)
except Exception as e:
raise Exception(
"error casting '{0}' to float on in line {1}, tpl line {2} for {3}: {4}".format(
in_str, iin, itpl, tpl_str, str(e)
)
)
if pname in pdict:
eval = pdict[pname]
if not np.isclose(eval, v, 1.0e-6):
raise Exception(
"different values {0}:{1} for par {2} on in line {3}".format(
v, eval, pname, iin
)
)
else:
pnames.append(pname)
pvals.append(v)
pdict[pname] = v
itpl += 1
iin += 1
df = pd.DataFrame({"parnme": pnames, "parval1": pvals}, index=pnames)
return df
def try_process_output_file(ins_file, output_file=None):
"""attempt to process a model output file using a PEST-style instruction file
Args:
ins_file (`str`): path and name of an instruction file
output_file (`str`,optional): path and name of existing model
output file to process. If `None`, `ins_file.replace(".ins","")`
is used. Default is None.
Returns:
`pandas.DataFrame`: a dataframe of observation name and simulated outputs
extracted from `output_file`.
Note:
If an exception is raised when processing the output file, the exception
is echoed to the screen and `None` is returned.
Example::
df = pyemu.pst_utils.try_process_output_file("my.ins","my.output")
"""
if output_file is None:
output_file = ins_file.replace(".ins", "")
df = None
i = InstructionFile(ins_file)
try:
df = i.read_output_file(output_file)
except Exception as e:
print("error processing instruction/output file pair: {0}".format(str(e)))
return df
def try_process_output_pst(pst):
"""attempt to process each instruction file, model output
file pair in a `pyemu.Pst`.
Args:
pst (`pyemu.Pst`): a control file instance
Returns:
`pandas.DataFrame`: a dataframe of observation names and simulated outputs
extracted from model output files.
Note:
This function first tries to process the output files using the
InstructionFile class, If that failes, then it tries to run
INSCHEK. If an instructionfile is processed successfully,
the extract simulated values are used to populate the
`pst.observation_data.obsval` attribute.
"""
for ins_file, out_file in zip(pst.instruction_files, pst.output_files):
df = None
try:
i = InstructionFile(ins_file, pst=pst)
df = i.read_output_file(out_file)
except Exception as e:
warnings.warn(
"error processing instruction file {0}, trying inschek: {1}".format(
ins_file, str(e)
)
)
df = _try_run_inschek(ins_file, out_file)
if df is not None:
pst.observation_data.loc[df.index, "obsval"] = df.obsval
def _try_run_inschek(ins_file, out_file, cwd="."):
"""try to run inschek and load the resulting obf file"""
try:
pyemu.os_utils.run("inschek {0} {1}".format(ins_file, out_file), cwd=cwd)
obf_file = os.path.join(cwd, ins_file.replace(".ins", ".obf"))
df = pd.read_csv(
obf_file, delim_whitespace=True, skiprows=0, index_col=0, names=["obsval"]
)
df.index = df.index.map(str.lower)
return df
except Exception as e:
print(
"error using inschek for instruction file {0}:{1}".format(ins_file, str(e))
)
print("observations in this instruction file will have" + "generic values.")
return None
def get_phi_comps_from_recfile(recfile):
"""read the phi components from a record file by iteration
Args:
recfile (`str`): pest record file name
Returns:
`dict`: nested dictionary of iteration number, {group,contribution}
Note:
It is really poor form to use the record file in this way. Please only
use this as a last resort!
"""
iiter = 1
iters = {}
f = open(recfile, "r")
while True:
line = f.readline()
if line == "":
break
if (
"starting phi for this iteration" in line.lower()
or "final phi" in line.lower()
):
contributions = {}
while True:
line = f.readline()
if line == "":
break
if "contribution to phi" not in line.lower():
iters[iiter] = contributions
iiter += 1
break
raw = line.strip().split()
val = float(raw[-1])
group = raw[-3].lower().replace('"', "")
contributions[group] = val
return iters
def res_from_obseravtion_data(observation_data):
"""create a PEST-style residual dataframe filled with np.NaN for
missing information
Args:
observation_data (`pandas.DataFrame`): the "* observation data"
`pandas.DataFrame` from `pyemu.Pst.observation_data`
Returns:
`pandas.DataFrame`: a dataframe with the same columns as the
residual dataframe ("name","group","measured","modelled",
"residual","weight").
"""
res_df = observation_data.copy()
res_df.loc[:, "name"] = res_df.pop("obsnme")
res_df.loc[:, "measured"] = res_df.pop("obsval")
res_df.loc[:, "group"] = res_df.pop("obgnme")
res_df.loc[:, "modelled"] = np.NaN
res_df.loc[:, "residual"] = np.NaN
return res_df
def clean_missing_exponent(pst_filename, clean_filename="clean.pst"):
"""fixes the issue where some terrible fortran program may have
written a floating point format without the 'e' - like 1.0-3, really?!
Args:
pst_filename (`str`): the pest control file
clean_filename (`str`, optional): the new pest control file to write.
Default is "clean.pst"
"""
lines = []
with open(pst_filename, "r") as f:
for line in f:
line = line.lower().strip()
if "+" in line:
raw = line.split("+")
for i, r in enumerate(raw[:-1]):
if r[-1] != "e":
r = r + "e"
raw[i] = r
lines.append("+".join(raw))
else:
lines.append(line)
with open(clean_filename, "w") as f:
for line in lines:
f.write(line + "\n")
def csv_to_ins_file(
csv_filename,
ins_filename=None,
only_cols=None,
only_rows=None,
marker="~",
includes_header=True,
includes_index=True,
prefix="",
head_lines_len=0,
sep=",",
gpname=False,
):
"""write a PEST-style instruction file from an existing CSV file
Args:
csv_filename (`str`): path and name of existing CSV file
ins_filename (`str`, optional): path and name of the instruction
file to create. If `None`, then `csv_filename`+".ins" is used.
Default is `None`.
only_cols ([`str`]): list of columns to add observations for in the
resulting instruction file. If `None`, all columns are used.
only_rows ([`str`]): list of rows to add observations for in the
resulting instruction file. If `None`, all rows are used.
marker (`str`): the PEST instruction marker to use. Default is "~"
includes_header (`bool`): flag to indicate `csv_filename` includes a
header row as the first row. Default is True.
includes_index (`bool`): lag to indicate `csv_filename` includes a
index column as the first column. Default is True.
prefix (`str`, optional): a prefix to prepend to observation names.
Default is ""
gpname (`str` or [`str`]): Optional PEST group name for columns
Returns:
`pandas.DataFrame`: a dataframe of observation names and values found in
`csv_filename`
Note:
resulting observation names in `ins_filename` are a combiation of index and
header values.
"""
# process the csv_filename in case it is a dataframe
if isinstance(csv_filename, str):
df = pd.read_csv(csv_filename, index_col=0)
df.columns = df.columns.map(str.lower)
df.index = df.index.map(lambda x: str(x).lower())
else:
df = csv_filename
# process only_cols
if only_cols is None:
only_cols = set(df.columns.map(lambda x: x.lower().strip()).tolist())
else:
if isinstance(only_cols, str): # incase it is a single name
only_cols = [only_cols]
only_cols = set(only_cols)
only_cols = {c.lower() if isinstance(c, str) else c for c in only_cols}
if only_rows is None:
only_rows = set(df.index.map(lambda x: x.lower().strip()).tolist())
else:
if isinstance(only_rows, str): # incase it is a single name
only_rows = [only_rows]
only_rows = set(only_rows)
only_rows = {r.lower() if isinstance(r, str) else r for r in only_rows}
# process the row labels, handling duplicates
rlabels = []
row_visit = {}
only_rlabels = []
for rname_org in df.index:
rname = str(rname_org).strip().lower()
if rname in row_visit:
rsuffix = "_" + str(int(row_visit[rname] + 1))
row_visit[rname] += 1
else:
row_visit[rname] = 1
rsuffix = ""
rlabel = rname + rsuffix
rlabels.append(rlabel)
if rname in only_rows or rname_org in only_rows:
only_rlabels.append(rlabel)
only_rlabels = set(only_rlabels)
# process the col labels, handling duplicates
clabels = []
col_visit = {}
only_clabels = []
for cname_org in df.columns:
cname = str(cname_org).strip().lower()
if cname in col_visit:
csuffix = "_" + str(int(col_visit[cname] + 1))
col_visit[cname] += 1
else:
col_visit[cname] = 1
csuffix = ""
clabel = cname + csuffix
clabels.append(clabel)
if cname in only_cols or cname_org in only_cols:
only_clabels.append(clabel)
only_clabels = set(only_clabels)
if len(only_clabels) == 0:
print("only_cols:", only_cols)
raise Exception("csv_to_ins_file(): only_clabels is empty")
if ins_filename is None:
if not isinstance(csv_filename, str):
raise Exception("ins_filename is None but csv_filename is not string")
ins_filename = csv_filename + ".ins"
row_visit, col_visit = {}, {}
onames = []
ovals = []
ognames = []
only_clabels_len = len(only_clabels)
clabels_len = len(clabels)
prefix_is_str = isinstance(prefix, str)
vals = df.values.copy() # wasteful but way faster
with open(ins_filename, "w") as f:
f.write(f"pif {marker}\n")
[f.write("l1\n") for _ in range(head_lines_len)]
if includes_header:
f.write("l1\n") # skip the row (index) label
for i, rlabel in enumerate(rlabels): # loop over rows
f.write("l1")
if rlabel not in only_rlabels:
f.write("\n")
continue
c_count = 0
line = ""
for j, clabel in enumerate(clabels): # loop over columns
if j == 0:
# if first col and input file has an index need additional spacer
if includes_index:
if sep == ",":
# f.write(f" {marker},{marker}")
line += f" {marker},{marker}"
else:
# f.write(" !dum!")
line += " !dum! "
if c_count < only_clabels_len:
if clabel in only_clabels: # and rlabel in only_rlabels:
oname = ""
# define obs names
if not prefix_is_str:
nprefix = prefix[c_count]
else:
nprefix = prefix
if len(nprefix) > 0:
nname = f"{nprefix}_usecol:{clabel}"
else:
nname = f"usecol:{clabel}"
oname = f"{nname}_{rlabel}"
onames.append(oname) # append list of obs
ovals.append(vals[i, j]) # store current obs val
# defin group name
if gpname is False or gpname[c_count] is False:
# keeping consistent behaviour
ngpname = None # nname
elif gpname is True or gpname[c_count] is True:
ngpname = nname # set to base of obs name
else: # a group name has been specified
if not isinstance(gpname, str):
ngpname = gpname[c_count]
else:
ngpname = gpname
ognames.append(ngpname) # add to list of group names
# start defining string to write in ins
oname = f" !{oname}!"
line += f" {oname} "
if j < len(clabels) - 1:
if sep == ",":
line += f" {marker},{marker} "
# else:
# line += " !dum! "
c_count += 1
elif (
j < len(clabels) - 1
): # this isnt a row-col to observationalize (nice word!)
if sep == ",":
line += f" {marker},{marker} "
else:
line += " !dum! "
f.write(line + "\n")
odf = pd.DataFrame(
{"obsnme": onames, "obsval": ovals, "obgnme": ognames}, index=onames
).dropna(
axis=1
) # dropna to keep consistent after adding obgnme
return odf
class InstructionFile(object):
"""class for handling instruction files.
Args:
ins_filename (`str`): path and name of an existing instruction file
pst (`pyemu.Pst`, optional): Pst instance - used for checking that instruction file is
compatible with the control file (e.g. no duplicates)
Example::
i = InstructionFile("my.ins")
df = i.read_output_file("my.output")
"""
def __init__(self, ins_filename, pst=None):
self._ins_linecount = 0
self._out_linecount = 0
self._ins_filename = ins_filename
# self._pst = pst
self._marker = None
self._ins_filehandle = None
self._out_filehandle = None
self._last_line = ""
self._full_oname_set = None
if pst is not None:
self._full_oname_set = set(pst.obs_names)
self._found_oname_set = set()
self._instruction_lines = []
self._instruction_lcount = []
self.read_ins_file()
@property
def obs_name_set(self):
return self._found_oname_set
def read_ins_file(self):
"""read the instruction and do some minimal error checking.
Note:
This is called by the constructor
"""
self._instruction_lines = []
self._instruction_lcount = []
first_line = self._readline_ins()
if len(first_line) < 2:
raise Exception(
"first line of ins file must have atleast two entries, not '{0}'".format(
",".join(first_line)
)
)
if first_line[0] != "pif":
raise Exception(
"first line of ins file '{0}' must start with 'pif', not '{1}'".format(
self._ins_filename, first_line[0]
)
)
self._marker = first_line[1]
while True:
line = self._readline_ins()
if line is None:
break
elif len(line) == 0:
self.throw_ins_warning("empty line, breaking")
break
else:
c1 = line[0][:1]
if c1 == "l":
pass
elif c1 == self._marker:
pass
elif c1 == "&":
self.throw_ins_error("line continuation not supported")
else:
self.throw_ins_error(
"first token must be line advance ('l'), primary marker, or continuation ('&'),"
+ "not: {0}".format(line[0])
)
for token in line[1:]:
t1 = token[:1]
if t1 == "t":
self.throw_ins_error("tab instruction not supported")
elif t1 == self._marker:
tn = token[-1:]
if not tn == self._marker:
self.throw_ins_error(
"unbalanced secondary marker in token '{0}'".format(token)
)
for somarker, eomarker in zip(["!", "[", "("], ["!", "]", ")"]):
#
if t1 == somarker:
ofound = True
if eomarker not in token[1:]:
self.throw_ins_error(
"unmatched observation marker '{0}', looking for '{1}' in token '{2}'".format(
somarker, eomarker, token
)
)
raw = token[1:].split(eomarker)[0].replace(somarker, "")
if raw == "dum":
pass
else:
if (
self._full_oname_set is not None
and raw not in self._full_oname_set
):
self.throw_ins_error(
"obs name '{0}' not in pst".format(raw)
)
elif raw in self._found_oname_set:
self.throw_ins_error(
"obs name '{0}' is listed more than once".format(
raw
)
)
self._found_oname_set.add(raw)
break
# print(raw)
self._instruction_lines.append(line)
self._instruction_lcount.append(self._ins_linecount)
def throw_ins_warning(self, message, lcount=None):
"""throw a verbose PyemuWarning
Args:
message (`str`): the warning message
lcount (`int`, optional): warning line number. If None, self._ins_linecount is used
"""
if lcount is None:
lcount = self._ins_linecount
warnings.warn(
"InstructionFile error processing instruction file {0} on line number {1}: {2}".format(
self._ins_filename, lcount, message
),
PyemuWarning,
)
def throw_ins_error(self, message, lcount=None):
"""throw a verbose instruction file error
Args:
message (`str`): the error message
lcount (`int`, optional): error line number. If None, self._ins_linecount is used
"""
if lcount is None:
lcount = self._ins_linecount
raise Exception(
"InstructionFile error processing instruction file on line number {0}: {1}".format(
lcount, message
)
)
def throw_out_error(self, message, lcount=None):
"""throw a verbose output file error
Args:
message (`str`): the error message
lcount (`int`, optional): error line number. If None, self._ins_linecount is used
"""
if lcount is None:
lcount = self._out_linecount
raise Exception(
"InstructionFile error processing output file on line number {0}: {1}".format(
lcount, message
)
)
def read_output_file(self, output_file):
"""process a model output file using `InstructionFile.instruction_set`
Args:
output_file (`str`): path and name of existing output file
Returns:
`pd.DataFrame`: a dataframe with observation names and simulated values
extracted from `output_file`
"""
self._out_filename = output_file
val_dict = {}
for ins_line, ins_lcount in zip(
self._instruction_lines, self._instruction_lcount
):
# try:
val_dict.update(self._execute_ins_line(ins_line, ins_lcount))
# except Exception as e:
# raise Exception(str(e))
df = pd.DataFrame.from_dict(val_dict, orient="index", columns=["obsval"])
# s = pd.Series(val_dict)
# s.sort_index(inplace=True)
return df.sort_index()
def _execute_ins_line(self, ins_line, ins_lcount):
"""private method to process output file lines with an instruction line"""
cursor_pos = 0 # starting cursor position
val_dict = {} # storage dict for obsname: obsval pairs in line
# for ii,ins in enumerate(ins_line):
ii = 0 # counter over instruction entries
all_markers = True
line_seps = set([",", " ", "\t"])
n_ins = len(ins_line) # number of instructions on line
maxsearch = 500 # maximum number of characters to search when slicing line
while True:
if ii >= n_ins:
break
ins = ins_line[ii] # extract instruction
i1 = ins[:1] # first char in instruction
# primary marker
if ii == 0 and i1 == self._marker:
# if first and instruction starts with primary marker
# search for presence of primary marker e.g. ~start~
mstr = ins.replace(self._marker, "")
while True:
# loop over lines until primary marker is found
line = self._readline_output() # read line from output
if line is None:
self.throw_out_error(
"EOF when trying to find primary marker '{0}' from "
"instruction file line {1}".format(mstr, ins_lcount)
)
if mstr in line: # when marker is found break and update
# cursor position in current line
break
# copy a version of line commas replaced
# (to support comma sep strings)
rline = line.replace(",", " ").replace("\t","")
cursor_pos = line.index(mstr) + len(mstr)
# line advance
elif i1 == "l": # if start of instruction is line advance
try:
nlines = int(ins[1:]) # try and get advance number
except Exception as e:
self.throw_ins_error(
"casting line advance to int for "
"instruction '{0}'".format(ins),
ins_lcount,
)
for i in range(nlines):
line = self._readline_output()
if line is None:
self.throw_out_error(
"EOF when trying to read {0} lines for line "
"advance instruction '{1}', from instruction "
"file line number {2}".format(nlines, ins, ins_lcount)
)
# copy a version of line commas replaced
# (to support comma sep strings)
rline = line.replace(",", " ")
elif ins == "w": # whole string comparison
raw = rline[cursor_pos : cursor_pos + maxsearch].split(
None, 2
) # TODO: maybe slow for long strings -- hopefuly maxsearch helps
if line[cursor_pos] in line_seps:
raw.insert(0, "")
if len(raw) == 1:
self.throw_out_error(
"no whitespaces found on output line {0} past {1}".format(
line, cursor_pos
)
)
# step over current value
cursor_pos = rline.replace("\t"," ").find(" ", cursor_pos)
# now find position of next entry
cursor_pos = rline.find(raw[1], cursor_pos)
# raw[1]
# )
elif i1 == "!": # indicates obs instruction folows
oname = ins.replace("!", "")
# look a head for a second/closing marker
if ii < n_ins - 1 and ins_line[ii + 1] == self._marker:
# if penultimate instruction and last instruction is
# primary marker, look for that marker in line
m = ins_line[ii + 1].replace(self._marker, "")
es = line.find(m, cursor_pos)
if es == -1: # m not in rest of line
self.throw_out_error(
"secondary marker '{0}' not found from cursor_pos {1}".format(
m, cursor_pos
)
)
# read to closing marker
val_str = line[cursor_pos:es]
else:
# find next space in (r)line -- signifies end of entry
es = rline.find(" ", cursor_pos)
if es == -1 or es == cursor_pos:
# if no space or current position is space
# use old fashioned split to get value
# -- this will happen if there are leading blanks before
# vals in output file (e.g. formatted)
val_str = rline[cursor_pos : cursor_pos + maxsearch].split(
None, 1
)[0]
else:
# read val (constrained slice is faster for big strings)
val_str = rline[cursor_pos:es]
try:
val = float(val_str)
except Exception as e:
if oname != "dum":
self.throw_out_error(
"casting string '{0}' to float for instruction '{1}'".format(
val_str, ins
)
)
if oname != "dum":
val_dict[oname] = val
ipos = line.find(val_str.strip(), cursor_pos)
# val_len = len(val_str)
cursor_pos = ipos + len(val_str) # update cursor
all_markers = False
elif i1 == self._marker:
m = ins.replace(self._marker, "") # extract just primary marker
# find position of primary marker in line
es = line.find(m, cursor_pos)
if es == -1: # m not in rest of line
if all_markers:
ii = 0
continue
else:
self.throw_out_error(
"secondary marker '{0}' not found from "
"cursor_pos {1}".format(m, cursor_pos)
)
cursor_pos = es + len(m)
elif i1 == "(":
if ")" not in ins:
self.throw_ins_error("unmatched ')'", self._instruction_lcount)
oname = ins[1:].split(")", 1)[0].lower()
raw = ins.split(")")[1]
if ":" not in raw:
self.throw_ins_error(
"couldnt find ':' in semi-fixed instruction: '{0}'".format(ins),
lcount=self._instruction_lcount,
)
raw = raw.split(":")
try:
s_idx = int(raw[0]) - 1
except Exception as e:
self.throw_ins_error(
"error converting '{0}' to integer in semi-fixed instruction: '{1}'".format(
raw[0], ins
),
lcount=self._instruction_lcount,
)
try:
e_idx = int(raw[1])
except Exception as e:
self.throw_ins_error(
"error converting '{0}' to integer in semi-fixed instruction: '{1}'".format(
raw[1], ins
),
lcount=self._instruction_lcount,
)
if len(line) < e_idx:
self.throw_out_error(
"output line only {0} chars long, semi-fixed ending col {1}".format(
len(line), e_idx
)
)
if cursor_pos > e_idx:
self.throw_out_error(
"cursor at {0} has already read past semi-fixed ending col {1}".format(
cursor_pos, e_idx
)
)
ss_idx = max(cursor_pos, s_idx)
raw = line[ss_idx : ss_idx + maxsearch].split(
None, 1
) # slpitting only 1 might be margin faster
rs_idx = line.index(raw[0])
if rs_idx > e_idx:
self.throw_out_error(
"no non-whitespace chars found in semi-fixed observation {0}".format(
ins
)
)
re_idx = rs_idx + len(raw[0])
val_str = line[rs_idx:re_idx]
try:
val = float(val_str)
except Exception as e:
if oname != "dum":
self.throw_out_error(
"casting string '{0}' to float for instruction '{1}'".format(
val_str, ins
)
)
if oname != "dum":
val_dict[oname] = val
cursor_pos = re_idx
elif i1 == "[":
if "]" not in ins:
self.throw_ins_error("unmatched ']'", self._instruction_lcount)
oname = ins[1:].split("]", 1)[0].lower()
raw = ins.split("]")[1]
if ":" not in raw:
self.throw_ins_error(
"couldnt find ':' in fixed instruction: '{0}'".format(ins),
lcount=self._instruction_lcount,
)
raw = raw.split(":")
try:
s_idx = int(raw[0]) - 1
except Exception as e:
self.throw_ins_error(
"error converting '{0}' to integer in fixed instruction: '{1}'".format(
raw[0], ins
),
lcount=self._instruction_lcount,
)
try:
e_idx = int(raw[1])
except Exception as e:
self.throw_ins_error(
"error converting '{0}' to integer in fixed instruction: '{1}'".format(
raw[1], ins
),
lcount=self._instruction_lcount,
)
if len(line) < e_idx:
self.throw_out_error(
"output line only {0} chars long, fixed ending col {1}".format(
len(line), e_idx
)
)
if cursor_pos > s_idx:
self.throw_out_error(
"cursor at {0} has already read past fixed starting col {1}".format(
cursor_pos, e_idx
)
)
val_str = line[s_idx:e_idx]
try:
val = float(val_str)
except Exception as e:
if oname != "dum":
self.throw_out_error(
"casting string '{0}' to float for instruction '{1}'".format(
val_str, ins
)
)
if oname != "dum":
val_dict[oname] = val
cursor_pos = e_idx
else:
self.throw_out_error(
"unrecognized instruction '{0}' on ins file line {1}".format(
ins, ins_lcount
)
)
ii += 1
return val_dict
def _readline_ins(self):
"""consolidate private method to read the next instruction file line. Casts to lower and splits
on whitespace
"""
if self._ins_filehandle is None:
if not os.path.exists(self._ins_filename):
raise Exception(
"instruction file '{0}' not found".format(self._ins_filename)
)
self._ins_filehandle = open(self._ins_filename, "r")
line = self._ins_filehandle.readline()
self._ins_linecount += 1
if line == "":
return None
self._last_line = line
# check for spaces in between the markers - this gets ugly
line = line.lower()
if self._marker is not None and self._marker in line:
# def find_all(a_str, sub):
# start = 0
# while True:
# start = a_str.find(sub, start)
# if start == -1:
# return
# yield start
# start += len(sub)
# poss speedup using regex
midx = [m.start() for m in re.finditer(re.escape(self._marker), line)]
# midx = list(find_all(line, self._marker))
midx.append(len(line))
first = line[: midx[0]].strip()
tokens = []
if len(first) > 0:
# tokens.append(first)
tokens.extend([f.strip() for f in first.split()])
for idx in range(1, len(midx) - 1, 2):
mstr = line[midx[idx - 1] : midx[idx] + 1]
ostr = line[midx[idx] + 1 : midx[idx + 1]]
tokens.append(mstr)
tokens.extend(ostr.split())
else:
tokens = line.strip().split()
return tokens
def _readline_output(self):
"""consolidate private method to read the next output file line. Casts to lower"""
if self._out_filehandle is None:
if not os.path.exists(self._out_filename):
raise Exception(
"output file '{0}' not found".format(self._out_filename)
)
self._out_filehandle = open(self._out_filename, "r")
line = self._out_filehandle.readline()
self._out_linecount += 1
if line == "":
return None
self._last_line = line
return line.lower()
def process_output_files(pst, pst_path="."):
"""helper function to process output files using the
InstructionFile class
Args:
pst (`pyemu.Pst`): control file instance
pst_path (`str`): path to instruction and output files to append to the front
of the names in the Pst instance
Returns:
`pd.DataFrame`: dataframe of observation names and simulated values
extracted from the model output files listed in `pst`
Example::
pst = pyemu.Pst("my.pst")
df = pyemu.pst_utils.process_output_files(pst)
"""
if not isinstance(pst, pyemu.Pst):
raise Exception(
"process_output_files error: 'pst' arg must be pyemu.Pst instance"
)
series = []
for ins, out in zip(pst.instruction_files, pst.output_files):
ins = os.path.join(pst_path, ins)
out = os.path.join(pst_path, out)
if not os.path.exists(out):
warnings.warn("out file '{0}' not found".format(out), PyemuWarning)
f = os.path.join(pst_path, ins)
i = InstructionFile(ins, pst=pst)
try:
s = i.read_output_file(out)
series.append(s)
except Exception as e:
warnings.warn("error processing output file '{0}': {1}".format(out, str(e)))
if len(series) == 0:
return None
series = pd.concat(series)
# print(series)
return series
| {
"content_hash": "071b5ab25b37dacc6de62e8db2e0e73e",
"timestamp": "",
"source": "github",
"line_count": 1817,
"max_line_length": 110,
"avg_line_length": 35.23720418271877,
"alnum_prop": 0.5041077062443382,
"repo_name": "jtwhite79/pyemu",
"id": "7dbd71789c49abafb3cb99b85121079fb71be30f",
"size": "64026",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pyemu/pst/pst_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "BASIC",
"bytes": "10830"
},
{
"name": "Batchfile",
"bytes": "1090"
},
{
"name": "Faust",
"bytes": "4256"
},
{
"name": "FreeBasic",
"bytes": "277"
},
{
"name": "Go",
"bytes": "277355"
},
{
"name": "JetBrains MPS",
"bytes": "17759"
},
{
"name": "Jupyter Notebook",
"bytes": "1254666"
},
{
"name": "PLSQL",
"bytes": "2946"
},
{
"name": "Python",
"bytes": "1583947"
},
{
"name": "Reason",
"bytes": "2913705"
},
{
"name": "Smarty",
"bytes": "649093"
},
{
"name": "TeX",
"bytes": "5257815"
}
],
"symlink_target": ""
} |
@profile
def biglist():
list_ = []
for i in range(0, 1000000):
list_.append(i)
return list_
biglist() | {
"content_hash": "4f7ddd39e452ac58c8bf4c9230add60e",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 28,
"avg_line_length": 15.142857142857142,
"alnum_prop": 0.6415094339622641,
"repo_name": "jphall663/bellarmine_py_intro",
"id": "dcf6b457ac8fae5e205c2ae6499f58f20727dc56",
"size": "106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "biglist.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "33031"
},
{
"name": "Python",
"bytes": "57303"
}
],
"symlink_target": ""
} |
''' Encapsulate implicit state that is useful for Bokeh plotting APIs.
.. note::
While ``State`` objects can also be manipulated explicitly, they are
automatically configured when the functions :func:`~bokeh.io.output_file`,
etc. from :ref:`bokeh.io` are used, so this is not necessary under
typical usage.
Generating output for Bokeh plots requires coordinating several things:
:class:`~bokeh.document.Document`
Groups together Bokeh models that may be shared between plots (e.g.,
range or data source objects) into one common strucure.
:class:`~bokeh.resources.Resources`
Control how JavaScript and CSS for the client library BokehJS are
included and used in the generated output.
It is possible to handle the configuration of these things manually, and some
examples of doing this can be found in ``examples/models`` directory. When
developing sophisticated applications, it may be necessary or desirable to work
at this level. However, for general use this would quickly become burdensome.
This module provides a ``State`` class that encapsulates these objects and
ensures their proper configuration in many common usage scenarios.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import os
# External imports
from six import string_types
# Bokeh imports
from ..document import Document
from ..resources import Resources
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'curstate',
'State',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class State(object):
''' Manage state related to controlling Bokeh output.
'''
def __init__(self):
self.last_comms_handle = None
self.uuid_to_server = {} # Mapping from uuid to server instance
self.reset()
# Properties --------------------------------------------------------------
@property
def document(self):
''' A default :class:`~bokeh.document.Document` to use for all
output operations.
'''
return self._document
@document.setter
def document(self, doc):
self._document = doc
@property
def file(self):
''' A dict with the default configuration for file output (READ ONLY)
The dictionary value has the following form:
.. code-block:: python
{
'filename' : # filename to use when saving
'resources' : # resources configuration
'title' : # a title for the HTML document
}
'''
return self._file
@property
def notebook(self):
''' Whether to generate notebook output on show operations. (READ ONLY)
'''
return self._notebook
@property
def notebook_type(self):
''' Notebook type
'''
return self._notebook_type
@notebook_type.setter
def notebook_type(self, notebook_type):
''' Notebook type, acceptable values are 'jupyter' as well as any names
defined by external notebook hooks that have been installed.
'''
if notebook_type is None or not isinstance(notebook_type, string_types):
raise ValueError("Notebook type must be a string")
self._notebook_type = notebook_type.lower()
# Public methods ----------------------------------------------------------
def output_file(self, filename, title="Bokeh Plot", mode="cdn", root_dir=None):
''' Configure output to a standalone HTML file.
Calling ``output_file`` not clear the effects of any other calls to
``output_notebook``, etc. It adds an additional output destination
(publishing to HTML files). Any other active output modes continue
to be active.
Args:
filename (str) : a filename for saving the HTML document
title (str, optional) : a title for the HTML document
mode (str, optional) : how to include BokehJS (default: ``'cdn'``)
One of: ``'inline'``, ``'cdn'``, ``'relative(-dev)'`` or
``'absolute(-dev)'``. See :class:`~bokeh.resources.Resources`
for more details.
root_dir (str, optional) : root dir to use for absolute resources
(default: None)
This value is ignored for other resource types, e.g. ``INLINE`` or``CDN``.
.. warning::
The specified output file will be overwritten on every save, e.g.,
every time ``show()`` or ``save()`` is called.
'''
self._file = {
'filename' : filename,
'resources' : Resources(mode=mode, root_dir=root_dir),
'title' : title
}
if os.path.isfile(filename):
log.info("Session output file '%s' already exists, will be overwritten." % filename)
def output_notebook(self, notebook_type='jupyter'):
''' Generate output in notebook cells.
Calling ``output_notebook`` not clear the effects of any other calls
to ``output_file``, etc. It adds an additional output destination
(publishing to notebook output cells). Any other active output modes
continue to be active.
Returns:
None
'''
self._notebook = True
self.notebook_type = notebook_type
def reset(self):
''' Deactivate all currently active output modes and set ``curdoc()``
to a fresh empty ``Document``.
Subsequent calls to ``show()`` will not render until a new output mode
is activated.
Returns:
None
'''
self._reset_with_doc(Document())
# Private methods ---------------------------------------------------------
def _reset_keeping_doc(self):
''' Reset output modes but DO NOT replace the default Document
'''
self._file = None
self._notebook = False
self._notebook_type = None
def _reset_with_doc(self, doc):
''' Reset output modes but DO replace the default Document
'''
self._document = doc
self._reset_keeping_doc()
def curstate():
''' Return the current State object
Returns:
State : the current default State object
'''
global _STATE
if _STATE is None:
_STATE = State()
return _STATE
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
_STATE = None
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| {
"content_hash": "14a4ea9686c686183d7293ddf6f0480f",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 96,
"avg_line_length": 31.991525423728813,
"alnum_prop": 0.5176158940397351,
"repo_name": "dennisobrien/bokeh",
"id": "962762e3fd6ae39fdc81397c7d7c23d16c601a98",
"size": "7897",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bokeh/io/state.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "102287"
},
{
"name": "CoffeeScript",
"bytes": "413132"
},
{
"name": "Dockerfile",
"bytes": "4099"
},
{
"name": "HTML",
"bytes": "47532"
},
{
"name": "JavaScript",
"bytes": "25172"
},
{
"name": "Makefile",
"bytes": "1150"
},
{
"name": "PowerShell",
"bytes": "691"
},
{
"name": "Python",
"bytes": "3335869"
},
{
"name": "Shell",
"bytes": "9209"
},
{
"name": "TypeScript",
"bytes": "1634873"
}
],
"symlink_target": ""
} |
"""Tests for the MacKeeper Cache database plugin."""
import unittest
# pylint: disable=unused-import
from plaso.formatters import mackeeper_cache as mackeeper_cache_formatter
from plaso.lib import event
from plaso.lib import timelib_test
from plaso.parsers.sqlite_plugins import mackeeper_cache
from plaso.parsers.sqlite_plugins import test_lib
class MacKeeperCachePluginTest(test_lib.SQLitePluginTestCase):
"""Tests for the MacKeeper Cache database plugin."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
pre_obj = event.PreprocessObject()
self._plugin = mackeeper_cache.MacKeeperCachePlugin(pre_obj)
def testProcess(self):
"""Tests the Process function on a MacKeeper Cache database file."""
test_file = self._GetTestFilePath(['mackeeper_cache.db'])
event_generator = self._ParseDatabaseFileWithPlugin(self._plugin, test_file)
event_objects = self._GetEventObjects(event_generator)
# The cache file contains 198 entries.
self.assertEquals(len(event_objects), 198)
event_object = event_objects[41]
expected_timestamp = timelib_test.CopyStringToTimestamp(
'2013-07-12 19:30:31')
self.assertEquals(event_object.timestamp, expected_timestamp)
expected_msg = (
u'Chat Outgoing Message : I have received your system scan report and '
u'I will start analyzing it right now. [ URL: http://support.kromtech.'
u'net/chat/listen/12828340738351e0593f987450z40787/?client-id=51e0593f'
u'a1a24468673655&callback=jQuery183013571173651143909_1373657420912&_='
u'1373657423647 Event ID: 16059074 Room: '
u'12828340738351e0593f987450z40787 ]')
expected_short = (
u'I have received your system scan report and I will start analyzing '
u'it right now.')
self._TestGetMessageStrings(event_object, expected_msg, expected_short)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "3a2704c9bf6ecbec6db20adbd84f3f70",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 80,
"avg_line_length": 37.23076923076923,
"alnum_prop": 0.7283057851239669,
"repo_name": "iwm911/plaso",
"id": "80f2366ce25239398606c2520755de96a2496dbd",
"size": "2634",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaso/parsers/sqlite_plugins/mackeeper_cache_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2431825"
},
{
"name": "Shell",
"bytes": "21885"
},
{
"name": "VHDL",
"bytes": "2100224"
}
],
"symlink_target": ""
} |
from kivy.support import install_twisted_reactor
install_twisted_reactor()
### Simple twisted Client to Controll my Local Tinkerforge
from twisted.internet import reactor, protocol
class EchoClient(protocol.Protocol):
def connectionMade(self):
self.factory.app.on_connection(self.transport)
def dataReceived(self, data):
self.factory.app.print_message(data)
#self.transport.write('fuckoff')
class EchoFactory(protocol.ClientFactory):
protocol = EchoClient
def __init__(self, app):
self.app = app
def clientConnectionLost(self, conn, reason):
self.app.print_message("connection lost")
def clientConnectionFailed(self, conn, reason):
self.app.print_message("connection failed")
############### BEGIN UI ######################
############### Powered by KIVY ######################
from kivy.app import App
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.uix.boxlayout import BoxLayout
# A simple kivy App, with a textbox to enter messages, and
# a large label to display all the messages received from
# the server
class TwistedClientApp(App):
connection = None
def build(self):
root = self.setup_gui()
self.connect_to_server()
return root
def setup_gui(self):
self.textbox = TextInput(size_hint_y=.1, multiline=False)
self.textbox.bind(on_text_validate=self.send_message)
self.label = Label(text='connecting...\n')
self.layout = BoxLayout(orientation='vertical')
self.layout.add_widget(self.textbox)
self.layout.add_widget(self.label)
return self.layout
def connect_to_server(self):
reactor.connectTCP('192.168.0.111', 8000, EchoFactory(self))
def on_connection(self, connection):
self.print_message("connected succesfully!")
self.connection = connection
print self.connection
def send_message(self, *args):
msg = self.textbox.text
if msg and self.connection:
self.connection.write(str(self.textbox.text))
self.textbox.text = ""
def print_message(self, msg):
self.label.text += msg + "\n"
if __name__ == '__main__':
TwistedClientApp().run() | {
"content_hash": "792deec3df650ee8acd1bdc4168e8ac8",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 68,
"avg_line_length": 30.2,
"alnum_prop": 0.6512141280353201,
"repo_name": "DeathPoison/roomControll",
"id": "301e1ebfff16f671f4fe8bcb7ebba1b917f33149",
"size": "2356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Easy_Kivy_Client/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "81722"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, absolute_import
class Type(dict):
def __init__(self, type_name):
super(Type, self).__init__()
self.type_name = type_name
self["type"] = self._build_dict()
def _build_dict(self):
return {
"value": self.type_name
}
| {
"content_hash": "ad86a6659d310de9e60da4bed975a7d2",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 56,
"avg_line_length": 22.714285714285715,
"alnum_prop": 0.5471698113207547,
"repo_name": "Yipit/pyeqs",
"id": "c9f5e863ce1f9475e339f7df41cebcdce563c929",
"size": "342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyeqs/dsl/type.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1644"
},
{
"name": "Python",
"bytes": "144689"
}
],
"symlink_target": ""
} |
import numpy as np
import regreg.api as rr
from regreg.problems.simple import gengrad
import nose.tools as nt
from test_seminorms import ac
from copy import copy
def test_simple():
Z = np.random.standard_normal((10,10)) * 4
p = rr.l1_l2((10,10), lagrange=0.13)
dual = p.conjugate
L = 0.23
loss = rr.quadratic.shift(-Z, coef=L)
problem = rr.simple_problem(loss, p)
solver = rr.FISTA(problem)
solver.fit(tol=1.0e-10, debug=True)
simple_coef = solver.composite.coefs
q = rr.identity_quadratic(L, Z, 0, 0)
prox_coef = p.proximal(q)
p2 = copy(p)
p2.quadratic = rr.identity_quadratic(L, Z, 0, 0)
problem = rr.simple_problem.nonsmooth(p2)
solver = rr.FISTA(problem)
solver.fit(tol=1.0e-14, debug=True)
simple_nonsmooth_coef = solver.composite.coefs
p = rr.l1_l2((10,10), lagrange=0.13)
p.quadratic = rr.identity_quadratic(L, Z, 0, 0)
problem = rr.simple_problem.nonsmooth(p)
simple_nonsmooth_gengrad = gengrad(problem, L, tol=1.0e-10)
p = rr.l1_l2((10,10), lagrange=0.13)
problem = rr.separable_problem.singleton(p, loss)
solver = rr.FISTA(problem)
solver.fit(tol=1.0e-10)
separable_coef = solver.composite.coefs
ac(prox_coef, Z-simple_coef, 'prox to simple')
ac(prox_coef, simple_nonsmooth_gengrad, 'prox to nonsmooth gengrad')
ac(prox_coef, separable_coef, 'prox to separable')
ac(prox_coef, simple_nonsmooth_coef, 'prox to simple_nonsmooth')
# yield ac, prox_coef, Z - simple_dual_coef, 'prox to simple dual'
# yield ac, prox_coef, simple_nonsmooth_gengrad, 'prox to nonsmooth gengrad'
# yield ac, prox_coef, separable_coef, 'prox to separable'
# yield ac, prox_coef, simple_nonsmooth_coef, 'prox to simple_nonsmooth'
| {
"content_hash": "906872de33713dbbbf33ce1ff69f105a",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 80,
"avg_line_length": 31.517857142857142,
"alnum_prop": 0.6713881019830028,
"repo_name": "klingebj/regreg",
"id": "7b2341b8986263e5a850722155e5052f4174bd9c",
"size": "1765",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/tests/test_simple_block.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "445648"
}
],
"symlink_target": ""
} |
from ..osid import registry as osid_registry
COMMENT_RECORD_TYPES = {
'file-comment': {
'authority': 'ODL.MIT.EDU',
'namespace': 'comment-type',
'identifier': 'file-comment',
'display_name': 'File Comment',
'display_label': 'File Comment',
'description': 'Comment via file',
'domain': 'commenting.Comment',
'module_path': 'dlkit.records.commenting.basic.file_comment_records',
'object_record_class_name': 'FileCommentRecord',
'form_record_class_name': 'FileCommentFormRecord'
}
}
COMMENT_RECORD_TYPES.update(osid_registry.__dict__.get('OSID_OBJECT_RECORD_TYPES', {}))
| {
"content_hash": "6c52dae79ad9953b830c16bf4712eb74",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 87,
"avg_line_length": 36.55555555555556,
"alnum_prop": 0.6291793313069909,
"repo_name": "mitsei/dlkit",
"id": "688735e7d475826a675e669696beccaf10d0dd64",
"size": "658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dlkit/records/commenting/registry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25170465"
},
{
"name": "TeX",
"bytes": "1088"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function, absolute_import
import tflearn
import speech_data
import tensorflow as tf
learning_rate = 0.0001
training_iters = 300000 # steps
batch_size = 64
width = 20 # mfcc features
height = 80 # (max) length of utterance
classes = 10 # digits
batch = word_batch = speech_data.mfcc_batch_generator(batch_size)
X, Y = next(batch)
trainX, trainY = X, Y
testX, testY = X, Y #overfit for now
# Network building
net = tflearn.input_data([None, width, height])
net = tflearn.lstm(net, 128, dropout=0.8)
net = tflearn.fully_connected(net, classes, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate, loss='categorical_crossentropy')
# Training
### add this "fix" for tensorflow version errors
col = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
for x in col:
tf.add_to_collection(tf.GraphKeys.VARIABLES, x )
model = tflearn.DNN(net, tensorboard_verbose=0)
while 1: #training_iters
model.fit(trainX, trainY, n_epoch=10, validation_set=(testX, testY), show_metric=True,
batch_size=batch_size)
_y=model.predict(X)
model.save("tflearn.lstm.model")
print (_y)
print (y)
| {
"content_hash": "97b18f4b2ffed42103b70ff5d6ea3b95",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 109,
"avg_line_length": 29.525,
"alnum_prop": 0.7273497036409822,
"repo_name": "shanaka-desoysa/tensorflow",
"id": "4694c61e6b563e60e7304fdbe96dc311c45a0151",
"size": "1317",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/speech_recognition/speech_demo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "523210"
},
{
"name": "Python",
"bytes": "154001"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import unittest
from imbox.parser import *
raw_email = b"""Delivered-To: johndoe@gmail.com
X-Originating-Email: [martin@amon.cx]
Message-ID: <test0@example.com>
Return-Path: martin@amon.cx
Date: Tue, 30 Jul 2013 15:56:29 +0300
From: Martin Rusev <martin@amon.cx>
MIME-Version: 1.0
To: John Doe <johndoe@gmail.com>
Subject: Test email - no attachment
Content-Type: multipart/alternative;
boundary="------------080505090108000500080106"
X-OriginalArrivalTime: 30 Jul 2013 12:56:43.0604 (UTC) FILETIME=[3DD52140:01CE8D24]
--------------080505090108000500080106
Content-Type: text/plain; charset="ISO-8859-1"; format=flowed
Content-Transfer-Encoding: 7bit
Hi, this is a test email with no attachments
--------------080505090108000500080106
Content-Type: text/html; charset="ISO-8859-1"
Content-Transfer-Encoding: 7bit
<html><head>
<meta http-equiv="content-type" content="text/html; charset=ISO-8859-1"></head><body
bgcolor="#FFFFFF" text="#000000">
Hi, this is a test email with no <span style="font-weight: bold;">attachments</span><br>
</body>
</html>
--------------080505090108000500080106--
"""
class TestParser(unittest.TestCase):
def test_parse_email(self):
parsed_email = parse_email(raw_email)
self.assertEqual(raw_email, parsed_email.raw_email)
self.assertEqual('Test email - no attachment', parsed_email.subject)
self.assertEqual('Tue, 30 Jul 2013 15:56:29 +0300', parsed_email.date)
self.assertEqual('<test0@example.com>', parsed_email.message_id)
def test_parse_email_ignores_header_casing(self):
self.assertEqual('one', parse_email(b'Message-ID: one').message_id)
self.assertEqual('one', parse_email(b'Message-Id: one').message_id)
self.assertEqual('one', parse_email(b'Message-id: one').message_id)
self.assertEqual('one', parse_email(b'message-id: one').message_id)
def test_decode_header(self):
self.assertEqual(decode_mail_header('=?koi8-r?B?UmU6IO7B0M/Nyc7BzsnFIM8g2sHO0dTJyQ==?='),
'Re: Напоминание о занятии')
self.assertEqual(decode_mail_header('=?koi8-r?B?79vJwsvBIDQwND8/Pw==?='), 'Ошибка 404???')
self.assertEqual(decode_mail_header('=?koi8-r?B?98/Q0s/TINDPINDSyczP1sXOycAgIvDPzNjTy8nKINHa2csg2sEg?='
'=?koi8-r?B?NyDV0s/Lz9ci?='),
'Вопрос по приложению "Польский язык за 7 уроков"')
def test_get_mail_addresses(self):
to_message_object = email.message_from_string("To: John Doe <johndoe@gmail.com>")
self.assertEqual([{'email': 'johndoe@gmail.com', 'name': 'John Doe'}],
get_mail_addresses(to_message_object, 'to'))
from_message_object = email.message_from_string("From: John Smith <johnsmith@gmail.com>")
self.assertEqual([{'email': 'johnsmith@gmail.com', 'name': 'John Smith'}],
get_mail_addresses(from_message_object, 'from'))
| {
"content_hash": "be2a4e6cbc38d5167e2790c8b8c75c1d",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 111,
"avg_line_length": 42.82857142857143,
"alnum_prop": 0.6644429619746498,
"repo_name": "ookami-kb/imbox",
"id": "06b7b0d93266392f35f601f86cb3cdda92c079a2",
"size": "3061",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/parser_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15581"
}
],
"symlink_target": ""
} |
def extractWorldofshandorCom(item):
'''
Parser for 'worldofshandor.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| {
"content_hash": "8405c2efb284e9184b1279e1318ee5c4",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 26.095238095238095,
"alnum_prop": 0.6295620437956204,
"repo_name": "fake-name/ReadableWebProxy",
"id": "3cb6cabbc42050fd7478ffec4749a8104d9b28bd",
"size": "549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractWorldofshandorCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
class Node:
def __init__(self, key):
self.left = None
self.right = None
self.data = key
| {
"content_hash": "23e73392262473c67848a882699e10f0",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 28,
"avg_line_length": 14.875,
"alnum_prop": 0.4957983193277311,
"repo_name": "sonymoon/algorithm",
"id": "6c9259d445e1c373ca13e514aaba9904f334bee7",
"size": "119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/python/geeksforgeeks/tree/Node.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "264506"
},
{
"name": "Python",
"bytes": "118484"
},
{
"name": "Shell",
"bytes": "235"
}
],
"symlink_target": ""
} |
"""
Given an integer n, return true if it is a power of three. Otherwise, return false.
An integer n is a power of three, if there exists an integer x such that n == 3x.
Example 1:
Input: n = 27
Output: true
Example 2:
Input: n = 0
Output: false
Example 3:
Input: n = 9
Output: true
Example 4:
Input: n = 45
Output: false
Constraints:
-231 <= n <= 231 - 1
Follow up: Could you do it without using any loop / recursion?
"""
class Solution:
def isPowerOfThree(self, n: int) -> bool:
while n > 3:
n, r = divmod(n, 3)
if r != 0:
return False
return n == 1 or n == 3 | {
"content_hash": "9b7b144ddc61c14f2113e9ae4f146bbc",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 83,
"avg_line_length": 18.085714285714285,
"alnum_prop": 0.6003159557661928,
"repo_name": "franklingu/leetcode-solutions",
"id": "e66e1b99268564314ee94133e2ca0ccc8e173f85",
"size": "637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "questions/power-of-three/Solution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "8919"
},
{
"name": "Java",
"bytes": "173033"
},
{
"name": "Python",
"bytes": "996874"
},
{
"name": "Shell",
"bytes": "2559"
}
],
"symlink_target": ""
} |
import mock
from praw import const
import prawdditions.patch
from .. import UnitTest
class TestRedditPrawdditions(UnitTest):
@mock.patch("praw.Reddit.post")
def test_reddit_message(self, mock_post):
prawdditions.patch.patch()
data = {
"to": "dummy_user",
"subject": "dummy_subject",
"text": "dummy_body",
}
self.reddit.message(data["to"], data["subject"], data["text"])
mock_post.assert_called_with(const.API_PATH["compose"], data=data)
@mock.patch("praw.Reddit.post")
def test_reddit_message_with_sr_and_subreddit(self, mock_post):
prawdditions.patch.patch()
data = {
"to": self.reddit.subreddit("testing"),
"subject": "dummy_subject",
"text": "dummy_body",
"from_sr": self.reddit.subreddit("test"),
}
self.reddit.message(
data["to"], data["subject"], data["text"], from_sr=data["from_sr"]
)
mock_post.assert_called_with(
const.API_PATH["compose"],
data={
k: str(v) if k != "to" else "/r/" + str(v)
for k, v in data.items()
},
)
| {
"content_hash": "72eed82918e00babc4a482f195cb766c",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 78,
"avg_line_length": 31.842105263157894,
"alnum_prop": 0.5371900826446281,
"repo_name": "praw-dev/prawdditions",
"id": "a7b90fcb2c9154771dc842762b8cf37d19a4d8aa",
"size": "1210",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/patch/test_prawddition_reddit.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "9491"
}
],
"symlink_target": ""
} |
import os
from django.db.models import signals
from django.conf import settings
from snapboard import models as snapboard_app
def test_setup(**kwargs):
from random import choice
from django.contrib.auth.models import User
from snapboard.models import Thread, Post, Category
from snapboard import sampledata
if not settings.DEBUG:
return
if Thread.objects.all().count() > 0:
# return, since there seem to already be threads in the database.
return
# ask for permission to create the test
msg = """
You've installed SNAPboard with DEBUG=True, do you want to populate
the board with random users/threads/posts to test-drive the application?
(yes/no):
"""
populate = raw_input(msg).strip()
while not (populate == "yes" or populate == "no"):
populate = raw_input("\nPlease type 'yes' or 'no': ").strip()
if populate == "no":
return
# create 10 random users
users = ('john', 'sally', 'susan', 'amanda', 'bob', 'tully', 'fran')
for u in users:
user = User.objects.get_or_create(username=u)
# user.is_staff = True
cats = ('Random Topics',
'Good Deals',
'Skiing in the Vermont Area',
'The Best Restaurants')
for c in cats:
cat = Category.objects.get_or_create(label=c)
# create up to 30 posts
tc = range(1, 50)
for i in range(0, 35):
print 'thread ', i, 'created'
cat= choice(Category.objects.all())
subj = choice(sampledata.objects.split('\n'))
thread = Thread(subject=subj, category=cat)
thread.save()
for j in range(0, choice(tc)):
text = '\n\n'.join([sampledata.sample_data() for x in range(0, choice(range(2, 5)))])
# create a post
post = Post(
user=choice(User.objects.all()),
thread=thread,
text=text,
ip='.'.join([str(choice(range(1,255))) for x in (1,2,3,4)]),
)
# allows setting of arbitrary ip
post.management_save()
#signals.post_syncdb.connect(test_setup, sender=snapboard_app)
# vim: ai ts=4 sts=4 et sw=4
| {
"content_hash": "1f667a587ed92cfb766c3b22e8ed3439",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 97,
"avg_line_length": 32.21739130434783,
"alnum_prop": 0.5852451641925326,
"repo_name": "cuker/snapboard",
"id": "5b055dfcf67aa24f73b4129df292d7ec0a712012",
"size": "2223",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snapboard/management/sampledata.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "14517"
},
{
"name": "Python",
"bytes": "193548"
}
],
"symlink_target": ""
} |
from tempfile import NamedTemporaryFile
from urllib.parse import urlparse
from airflow.contrib.hooks.ssh_hook import SSHHook
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.utils.decorators import apply_defaults
class S3ToSFTPOperator(BaseOperator):
"""
This operator enables the transferring of files from S3 to a SFTP server.
:param sftp_conn_id: The sftp connection id. The name or identifier for
establishing a connection to the SFTP server.
:type sftp_conn_id: str
:param sftp_path: The sftp remote path. This is the specified file path for
uploading file to the SFTP server.
:type sftp_path: str
:param s3_conn_id: The s3 connection id. The name or identifier for
establishing a connection to S3
:type s3_conn_id: str
:param s3_bucket: The targeted s3 bucket. This is the S3 bucket from
where the file is downloaded.
:type s3_bucket: str
:param s3_key: The targeted s3 key. This is the specified file path for
downloading the file from S3.
:type s3_key: str
"""
template_fields = ('s3_key', 'sftp_path')
@apply_defaults
def __init__(self,
s3_bucket,
s3_key,
sftp_path,
sftp_conn_id='ssh_default',
s3_conn_id='aws_default',
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.sftp_conn_id = sftp_conn_id
self.sftp_path = sftp_path
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.s3_conn_id = s3_conn_id
@staticmethod
def get_s3_key(s3_key):
"""This parses the correct format for S3 keys
regardless of how the S3 url is passed."""
parsed_s3_key = urlparse(s3_key)
return parsed_s3_key.path.lstrip('/')
def execute(self, context):
self.s3_key = self.get_s3_key(self.s3_key)
ssh_hook = SSHHook(ssh_conn_id=self.sftp_conn_id)
s3_hook = S3Hook(self.s3_conn_id)
s3_client = s3_hook.get_conn()
sftp_client = ssh_hook.get_conn().open_sftp()
with NamedTemporaryFile("w") as f:
s3_client.download_file(self.s3_bucket, self.s3_key, f.name)
sftp_client.put(f.name, self.sftp_path)
| {
"content_hash": "fc04b2d6904542e31141eb80810b894d",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 79,
"avg_line_length": 35.04477611940298,
"alnum_prop": 0.6252129471890971,
"repo_name": "Fokko/incubator-airflow",
"id": "7b5c0a5a93a34b388ebf3de0485269aef5a5e13c",
"size": "3160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/contrib/operators/s3_to_sftp_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "14170"
},
{
"name": "HTML",
"bytes": "145596"
},
{
"name": "JavaScript",
"bytes": "25233"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "8787104"
},
{
"name": "Shell",
"bytes": "187296"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
complaint_count_request = {
"$schema": "http://json-schema.org/draft-07/schema#",
"description": "complaint count request schema",
"type": "object",
"title": "Complaint count request",
"properties": {
"start_date": {"type": ["string", "null"], "format": "date"},
"end_date": {"type": ["string", "null"], "format": "date"},
},
}
| {
"content_hash": "5026a010a840cdbca2ec183e909bbecd",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 69,
"avg_line_length": 36.8,
"alnum_prop": 0.5570652173913043,
"repo_name": "alphagov/notifications-api",
"id": "882789513718e1cec025249549228754b8d395d8",
"size": "368",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "app/complaint/complaint_schema.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "719"
},
{
"name": "Jinja",
"bytes": "5543"
},
{
"name": "Makefile",
"bytes": "6627"
},
{
"name": "Mako",
"bytes": "361"
},
{
"name": "Procfile",
"bytes": "35"
},
{
"name": "Python",
"bytes": "3506225"
},
{
"name": "Shell",
"bytes": "13179"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
# import models into sdk package
from .models.account_api_serializer import AccountApiSerializer
from .models.public_api_data_create_response import PublicApiDataCreateResponse
from .models.depot_detail_serializer import DepotDetailSerializer
from .models.write_account_api_serializer import WriteAccountApiSerializer
from .models.public_api_data_type_retrieve_response import PublicApiDataTypeRetrieveResponse
from .models.account_integration_type_serializer import AccountIntegrationTypeSerializer
from .models.write_account_integration_serializer import WriteAccountIntegrationSerializer
from .models.write_account_integration_type_serializer import WriteAccountIntegrationTypeSerializer
from .models.write_serializer import WriteSerializer
from .models.account_integration_serializer import AccountIntegrationSerializer
from .models.serializer import Serializer
from .models.write_depot_detail_serializer import WriteDepotDetailSerializer
# import apis into sdk package
from .apis.accountintegration_api import AccountintegrationApi
from .apis.accountintegrationtype_api import AccountintegrationtypeApi
from .apis.data_api import DataApi
from .apis.account_api import AccountApi
# import ApiClient
from .api_client import ApiClient
__all__ = [
'ApiClient',
'AccountApiSerializer',
'PublicApiDataCreateResponse',
'DepotDetailSerializer',
'WriteAccountApiSerializer',
'PublicApiDataTypeRetrieveResponse',
'AccountIntegrationTypeSerializer',
'WriteAccountIntegrationSerializer',
'WriteAccountIntegrationTypeSerializer',
'WriteSerializer',
'AccountIntegrationSerializer',
'Serializer',
'WriteDepotDetailSerializer',
'AccountintegrationApi',
'AccountintegrationtypeApi',
'DataApi',
'AccountApi',
]
| {
"content_hash": "1a04ff2ee075fc0e9b0eb91140b6e292",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 99,
"avg_line_length": 41.11363636363637,
"alnum_prop": 0.8253178551686015,
"repo_name": "ambitioninc/ambition-python",
"id": "9c90cf39e61c7e84c1b665235814a92d18b64ca7",
"size": "1809",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ambition/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "126801"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shelf', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BookCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='BookEdition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('isbn', models.CharField(max_length=17)),
('date', models.DateField()),
],
),
migrations.CreateModel(
name='BookItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('catalog_number', models.CharField(max_length=30)),
('cover_type', models.CharField(choices=[('soft', 'Soft'), ('hard', 'Hard')], max_length=4)),
('edition', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shelf.BookEdition')),
],
),
migrations.RemoveField(
model_name='book',
name='author',
),
migrations.RemoveField(
model_name='book',
name='isbn',
),
migrations.RemoveField(
model_name='book',
name='publisher',
),
migrations.AddField(
model_name='book',
name='authors',
field=models.ManyToManyField(to='shelf.Author'),
),
migrations.AddField(
model_name='bookedition',
name='book',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shelf.Book'),
),
migrations.AddField(
model_name='bookedition',
name='publisher',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='shelf.Publisher'),
),
migrations.AddField(
model_name='book',
name='categories',
field=models.ManyToManyField(to='shelf.BookCategory'),
),
]
| {
"content_hash": "a64101a2cf5ef9dcda35cbfaf491b386",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 116,
"avg_line_length": 34.68571428571428,
"alnum_prop": 0.5370675453047776,
"repo_name": "KredekPth/Kurs_django",
"id": "9dbe3d9b9ac26ac01fc311a04a0d6079bd565172",
"size": "2499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shelf/migrations/0002_auto_20170427_2121.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1949"
},
{
"name": "Python",
"bytes": "16181"
}
],
"symlink_target": ""
} |
from telemetry.page.actions.all_page_actions import *
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class ToughSchedulingCasesPage(page_module.Page):
def __init__(self, url, page_set):
super(ToughSchedulingCasesPage, self).__init__(url=url, page_set=page_set)
def RunSmoothness(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage()
interaction.End()
class Page1(ToughSchedulingCasesPage):
""" Why: Simulate oversubscribed main thread """
def __init__(self, page_set):
super(Page1, self).__init__(
url='file://tough_scheduling_cases/simple_text_page.html?main_busy',
page_set=page_set)
self.synthetic_delays = {'cc.BeginMainFrame': {'target_duration': 0.008}}
class Page2(ToughSchedulingCasesPage):
""" Why: Simulate oversubscribed main thread """
def __init__(self, page_set):
super(Page2, self).__init__(
# pylint: disable=C0301
url='file://tough_scheduling_cases/simple_text_page.html?main_very_busy',
page_set=page_set)
self.synthetic_delays = {'cc.BeginMainFrame': {'target_duration': 0.024}}
class Page3(ToughSchedulingCasesPage):
""" Why: Simulate a page with a a few graphics layers """
def __init__(self, page_set):
super(Page3, self).__init__(
# pylint: disable=C0301
url='file://tough_scheduling_cases/simple_text_page.html?medium_layers',
page_set=page_set)
self.synthetic_delays = {
'cc.DrawAndSwap': {'target_duration': 0.004},
'gpu.PresentingFrame': {'target_duration': 0.004},
'cc.BeginMainFrame': {'target_duration': 0.004}
}
class Page4(ToughSchedulingCasesPage):
""" Why: Simulate a page with many graphics layers """
def __init__(self, page_set):
super(Page4, self).__init__(
# pylint: disable=C0301
url='file://tough_scheduling_cases/simple_text_page.html?many_layers',
page_set=page_set)
self.synthetic_delays = {
'cc.DrawAndSwap': {'target_duration': 0.012},
'gpu.PresentingFrame': {'target_duration': 0.012},
'cc.BeginMainFrame': {'target_duration': 0.012}
}
class Page5(ToughSchedulingCasesPage):
""" Why: Simulate a page with expensive recording and rasterization """
def __init__(self, page_set):
super(Page5, self).__init__(
# pylint: disable=C0301
url='file://tough_scheduling_cases/simple_text_page.html?medium_raster',
page_set=page_set)
self.synthetic_delays = {
'cc.RasterRequiredForActivation': {'target_duration': 0.004},
'cc.BeginMainFrame': {'target_duration': 0.004},
'gpu.AsyncTexImage': {'target_duration': 0.004}
}
class Page6(ToughSchedulingCasesPage):
""" Why: Simulate a page with expensive recording and rasterization """
def __init__(self, page_set):
super(Page6, self).__init__(
# pylint: disable=C0301
url='file://tough_scheduling_cases/simple_text_page.html?heavy_raster',
page_set=page_set)
self.synthetic_delays = {
'cc.RasterRequiredForActivation': {'target_duration': 0.024},
'cc.BeginMainFrame': {'target_duration': 0.024},
'gpu.AsyncTexImage': {'target_duration': 0.024}
}
class Page7(ToughSchedulingCasesPage):
""" Why: Medium cost touch handler """
def __init__(self, page_set):
super(Page7, self).__init__(
# pylint: disable=C0301
url='file://tough_scheduling_cases/touch_handler_scrolling.html?medium_handler',
page_set=page_set)
self.synthetic_delays = {'blink.HandleInputEvent':
{'target_duration': 0.008}}
class Page8(ToughSchedulingCasesPage):
""" Why: Slow touch handler """
def __init__(self, page_set):
super(Page8, self).__init__(
# pylint: disable=C0301
url='file://tough_scheduling_cases/touch_handler_scrolling.html?slow_handler',
page_set=page_set)
self.synthetic_delays = {'blink.HandleInputEvent':
{'target_duration': 0.024}}
class Page9(ToughSchedulingCasesPage):
""" Why: Touch handler that often takes a long time """
def __init__(self, page_set):
super(Page9, self).__init__(
# pylint: disable=C0301
url='file://tough_scheduling_cases/touch_handler_scrolling.html?janky_handler',
page_set=page_set)
self.synthetic_delays = {'blink.HandleInputEvent':
{'target_duration': 0.024, 'mode': 'alternating'}
}
class Page10(ToughSchedulingCasesPage):
""" Why: Touch handler that occasionally takes a long time """
def __init__(self, page_set):
super(Page10, self).__init__(
# pylint: disable=C0301
url='file://tough_scheduling_cases/touch_handler_scrolling.html?occasionally_janky_handler',
page_set=page_set)
self.synthetic_delays = {'blink.HandleInputEvent':
{'target_duration': 0.024, 'mode': 'oneshot'}}
class Page11(ToughSchedulingCasesPage):
""" Why: Super expensive touch handler causes browser to scroll after a
timeout.
"""
def __init__(self, page_set):
super(Page11, self).__init__(
# pylint: disable=C0301
url='file://tough_scheduling_cases/touch_handler_scrolling.html?super_slow_handler',
page_set=page_set)
self.synthetic_delays = {'blink.HandleInputEvent':
{'target_duration': 0.2}}
class Page12(ToughSchedulingCasesPage):
""" Why: Super expensive touch handler that only occupies a part of the page.
"""
def __init__(self, page_set):
super(Page12, self).__init__(
url='file://tough_scheduling_cases/div_touch_handler.html',
page_set=page_set)
self.synthetic_delays = {'blink.HandleInputEvent': {'target_duration': 0.2}}
class Page13(ToughSchedulingCasesPage):
""" Why: Test a moderately heavy requestAnimationFrame handler """
def __init__(self, page_set):
super(Page13, self).__init__(
url='file://tough_scheduling_cases/raf.html?medium_handler',
page_set=page_set)
self.synthetic_delays = {
'cc.RasterRequiredForActivation': {'target_duration': 0.004},
'cc.BeginMainFrame': {'target_duration': 0.004},
'gpu.AsyncTexImage': {'target_duration': 0.004}
}
class Page14(ToughSchedulingCasesPage):
""" Why: Test a moderately heavy requestAnimationFrame handler """
def __init__(self, page_set):
super(Page14, self).__init__(
url='file://tough_scheduling_cases/raf.html?heavy_handler',
page_set=page_set)
self.synthetic_delays = {
'cc.RasterRequiredForActivation': {'target_duration': 0.024},
'cc.BeginMainFrame': {'target_duration': 0.024},
'gpu.AsyncTexImage': {'target_duration': 0.024}
}
class Page15(ToughSchedulingCasesPage):
""" Why: Simulate a heavily GPU bound page """
def __init__(self, page_set):
super(Page15, self).__init__(
url='file://tough_scheduling_cases/raf.html?gpu_bound',
page_set=page_set)
self.synthetic_delays = {'gpu.PresentingFrame': {'target_duration': 0.1}}
class Page16(ToughSchedulingCasesPage):
""" Why: Test a requestAnimationFrame handler with a heavy first frame """
def __init__(self, page_set):
super(Page16, self).__init__(
url='file://tough_scheduling_cases/raf.html?heavy_first_frame',
page_set=page_set)
self.synthetic_delays = {'cc.BeginMainFrame': {'target_duration': 0.15}}
class Page17(ToughSchedulingCasesPage):
""" Why: Medium stress test for the scheduler """
def __init__(self, page_set):
super(Page17, self).__init__(
url='file://tough_scheduling_cases/raf_touch_animation.html?medium',
page_set=page_set)
self.synthetic_delays = {
'cc.DrawAndSwap': {'target_duration': 0.004},
'cc.BeginMainFrame': {'target_duration': 0.004}
}
class Page18(ToughSchedulingCasesPage):
""" Why: Heavy stress test for the scheduler """
def __init__(self, page_set):
super(Page18, self).__init__(
url='file://tough_scheduling_cases/raf_touch_animation.html?heavy',
page_set=page_set)
self.synthetic_delays = {
'cc.DrawAndSwap': {'target_duration': 0.012},
'cc.BeginMainFrame': {'target_duration': 0.012}
}
class Page19(ToughSchedulingCasesPage):
""" Why: Both main and impl thread animating concurrently """
def __init__(self, page_set):
super(Page19, self).__init__(
url='file://tough_scheduling_cases/split_animation.html',
page_set=page_set)
def RunSmoothness(self, action_runner):
action_runner.Wait(3)
class Page20(ToughSchedulingCasesPage):
""" Why: Simple JS touch dragging """
def __init__(self, page_set):
super(Page20, self).__init__(
url='file://tough_scheduling_cases/simple_touch_drag.html',
page_set=page_set)
def RunSmoothness(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollElement(
selector='#card',
use_touch=True,
direction='up',
speed_in_pixels_per_second=150,
distance=400)
interaction.End()
class EmptyTouchHandlerPage(ToughSchedulingCasesPage):
""" Why: Scrolling on a page with a touch handler that consumes no events but
may be slow """
def __init__(self, name, desktop, slow_handler, bounce, page_set):
super(EmptyTouchHandlerPage, self).__init__(
url='file://tough_scheduling_cases/empty_touch_handler' +
('_desktop' if desktop else '') + '.html?' + name,
page_set=page_set)
if slow_handler:
self.synthetic_delays = {
'blink.HandleInputEvent': {'target_duration': 0.2}
}
self.bounce = bounce
def RunSmoothness(self, action_runner):
if self.bounce:
interaction = action_runner.BeginGestureInteraction(
'ScrollBounceAction', is_smooth=True)
action_runner.ScrollBouncePage()
interaction.End()
else:
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
# Speed and distance are tuned to run exactly as long as a scroll
# bounce.
action_runner.ScrollPage(use_touch=True, speed_in_pixels_per_second=400,
distance=2100)
interaction.End()
class SynchronizedScrollOffsetPage(ToughSchedulingCasesPage):
"""Why: For measuring the latency of scroll-synchronized effects."""
def __init__(self, page_set):
super(SynchronizedScrollOffsetPage, self).__init__(
url='file://tough_scheduling_cases/sync_scroll_offset.html',
page_set=page_set)
def RunSmoothness(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollBounceAction', is_smooth=True)
action_runner.ScrollBouncePage()
interaction.End()
class ToughSchedulingCasesPageSet(page_set_module.PageSet):
""" Tough scheduler latency test cases """
def __init__(self):
super(ToughSchedulingCasesPageSet, self).__init__()
# Why: Simple scrolling baseline
self.AddPage(ToughSchedulingCasesPage(
'file://tough_scheduling_cases/simple_text_page.html',
self))
self.AddPage(Page1(self))
self.AddPage(Page2(self))
self.AddPage(Page3(self))
self.AddPage(Page4(self))
self.AddPage(Page5(self))
# self.AddPage(Page6(self)) Flaky crbug.com/368532
# Why: Touch handler scrolling baseline
self.AddPage(ToughSchedulingCasesPage(
'file://tough_scheduling_cases/touch_handler_scrolling.html',
self))
self.AddPage(Page7(self))
self.AddPage(Page8(self))
self.AddPage(Page9(self))
self.AddPage(Page10(self))
self.AddPage(Page11(self))
self.AddPage(Page12(self))
# Why: requestAnimationFrame scrolling baseline
self.AddPage(ToughSchedulingCasesPage(
'file://tough_scheduling_cases/raf.html',
self))
# Why: Test canvas blocking behavior
self.AddPage(ToughSchedulingCasesPage(
'file://tough_scheduling_cases/raf_canvas.html',
self))
self.AddPage(Page13(self))
# Disabled for flakiness. See 368532
# self.AddPage(Page14(self))
self.AddPage(Page15(self))
self.AddPage(Page16(self))
# Why: Test a requestAnimationFrame handler with concurrent CSS animation
self.AddPage(ToughSchedulingCasesPage(
'file://tough_scheduling_cases/raf_animation.html',
self))
# Why: Stress test for the scheduler
self.AddPage(ToughSchedulingCasesPage(
'file://tough_scheduling_cases/raf_touch_animation.html',
self))
self.AddPage(Page17(self))
self.AddPage(Page18(self))
self.AddPage(Page19(self))
self.AddPage(Page20(self))
# Why: Baseline for scrolling in the presence of a no-op touch handler
self.AddPage(EmptyTouchHandlerPage(
name='baseline',
desktop=False,
slow_handler=False,
bounce=False,
page_set=self))
# Why: Slow handler blocks scroll start
self.AddPage(EmptyTouchHandlerPage(
name='slow_handler',
desktop=False,
slow_handler=True,
bounce=False,
page_set=self))
# Why: Slow handler blocks scroll start until touch ACK timeout
self.AddPage(EmptyTouchHandlerPage(
name='desktop_slow_handler',
desktop=True,
slow_handler=True,
bounce=False,
page_set=self))
# Why: Scroll bounce showing repeated transitions between scrolling and
# sending synchronous touchmove events. Should be nearly as fast as
# scroll baseline.
self.AddPage(EmptyTouchHandlerPage(
name='bounce',
desktop=False,
slow_handler=False,
bounce=True,
page_set=self))
# Why: Scroll bounce with slow handler, repeated blocking.
self.AddPage(EmptyTouchHandlerPage(
name='bounce_slow_handler',
desktop=False,
slow_handler=True,
bounce=True,
page_set=self))
# Why: Scroll bounce with slow handler on desktop, blocks only once until
# ACK timeout.
self.AddPage(EmptyTouchHandlerPage(
name='bounce_desktop_slow_handler',
desktop=True,
slow_handler=True,
bounce=True,
page_set=self))
# Why: For measuring the latency of scroll-synchronized effects.
self.AddPage(SynchronizedScrollOffsetPage(page_set=self))
| {
"content_hash": "5abb30b827f2c3eb673cdfb1402d9654",
"timestamp": "",
"source": "github",
"line_count": 465,
"max_line_length": 98,
"avg_line_length": 30.830107526881722,
"alnum_prop": 0.6605747767857143,
"repo_name": "chromium2014/src",
"id": "89b3e92fab039c5d4623eae93b104e2ab063e78f",
"size": "14528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/perf/page_sets/tough_scheduling_cases.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "1889381"
},
{
"name": "Awk",
"bytes": "8660"
},
{
"name": "C",
"bytes": "39993418"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "220757674"
},
{
"name": "CSS",
"bytes": "973910"
},
{
"name": "Java",
"bytes": "6583410"
},
{
"name": "JavaScript",
"bytes": "20967999"
},
{
"name": "Mercury",
"bytes": "9480"
},
{
"name": "Objective-C",
"bytes": "943237"
},
{
"name": "Objective-C++",
"bytes": "7190130"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "674461"
},
{
"name": "Python",
"bytes": "10430892"
},
{
"name": "Rebol",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "1337040"
},
{
"name": "Standard ML",
"bytes": "3705"
},
{
"name": "Tcl",
"bytes": "277091"
},
{
"name": "XSLT",
"bytes": "13493"
},
{
"name": "nesC",
"bytes": "15206"
}
],
"symlink_target": ""
} |
"""
A set of functions to match sources in two input catalogs
Top-level functions
-------------------
find_match - finds matches between two catalogs, based on WCS coordinates
color_mag - given catalogs matched by find_match, computes colors for
matched objects
find_zp - essentially a focused version of color_mag. Uses the magnitude
differences between one catalog and a photometric catalog to
determine the zero-point needed for the first catalog
write_matchcat - writes an output catalog in the format of the output that
is produced by catcomb.c
"""
import numpy as n
from math import sqrt
from matplotlib import pyplot as plt
from astropy.io import ascii
from astropy.table import Table
try:
from CDFutils import coords
except ImportError:
import coords
#------------------------------------------------------------------------------
#class matchcat:
#
# """
# A class contaiing matching methods
# DON'T USE - JUST A POSSIBLE WAY FORWARD THAT HAS NOT BEEN DEVELOPED
# """
#
# def __init__(self, catfile1, catfile2):
# """
# Sets up the matchcat container
# """
#
# self.catfile1 = catfile1
# self.catfile2 = catfile2
#
# """ TO BE CONTINUED """
#-----------------------------------------------------------------------------
def match_coords(ra1, dec1, ra2, dec2, rmatch, dra2=0., ddec2=0., doplot=True):
"""
***** NOTE: Need to switch the distance calculation to a call to
astropy
The main function to match coordinates (may be called by matchcat function).
NOTE: the matchcat function below provides a simple input for match_coords
if the two sets of input coordinates are contained in Secat containers.
See find_match for how to create such Secat containers from input files.
Inputs:
ra1 - RA (decimal degrees) for first catalog
dec1 - Dec (decimal degrees) for first catalog
ra2 - RA (decimal degrees) for second catalog
dec2 - Dec (decimal degrees) for second catalog
rmatch - max distance for a valid match (arcsec)
dra2 - optional offset in ARCSEC to apply to ra2, if there is a known
offset between the catalogs (default=0.0)
ddec2 - optional offset in ARCSEC to apply to dec2, if there is a known
offset between the catalogs (default=0.0)
"""
print ""
print "Catalog info"
print "--------------------------------------------"
print " Catalog 1: %d coordinates" % len(ra1)
print " Catalog 2: %d coordinates" % len(ra2)
""" Initialize containers for output information """
nmatch = n.zeros(len(ra1),dtype=int)
dxmatch = n.zeros(len(ra1))
dymatch = n.zeros(len(ra1))
ramatch = n.zeros(len(ra1))
decmatch = n.zeros(len(ra1))
indmatch = n.ones(len(ra1),dtype=int) * -1
""" Correct for known shifts """
#dec2 += 1.39e-4 temporary kludge for fixing Cl1604 matches
ra2 = ra2.copy() + dra2/(3600.*n.cos(dec2))
dec2 = dec2.copy() + ddec2/3600.
""" Loop over catalog1 """
print ""
print "Searching for matches within %5.2f arcsec" % rmatch
print "-----------------------------------------"
for i in range(len(ra1)):
dx,dy = coords.sky_to_darcsec(ra1[i],dec1[i],ra2,dec2)
dpos = n.sqrt(dx**2 + dy**2)
isort = n.argsort(dpos)
if dpos[isort[0]]<=rmatch:
dxmatch[i] = dx[isort[0]]
dymatch[i] = dy[isort[0]]
ramatch[i] = ra1[i]
decmatch[i] = dec1[i]
nmatch[i] = dpos[dpos<=rmatch].size
indmatch[i] = isort[0]
del dx,dy,dpos
print " Sources in catalog 1 with matches in catalog 2: %d" % \
(nmatch>0).sum()
mdx = dxmatch[nmatch>0]
mdy = dymatch[nmatch>0]
mra = ramatch[nmatch>0]
mdec = decmatch[nmatch>0]
mdx0 = n.median(mdx)
mdy0 = n.median(mdy)
print " Median offset for matches (RA): %+6.2f arcsec" % mdx0
print " Median offset for matches (Dec): %+6.2f arcsec" % mdy0
""" Plot up some offsets, if desired """
if doplot:
plt.figure(1)
#
ax1 = plt.subplot(221)
plt.scatter(mra,mdy)
plt.setp(ax1.get_xticklabels(), visible=False)
plt.ylabel(r'$\Delta \delta$ (arcsec)')
plt.axhline(0.0,color='r')
#
ax2 = plt.subplot(223, sharex=ax1)
plt.scatter(mra,mdx)
plt.xlabel(r'$\alpha$')
plt.ylabel(r'$\Delta \alpha$ (arcsec)')
plt.axhline(0.0,color='r')
#
ax3 = plt.subplot(222, sharey=ax1)
plt.scatter(mdec,mdy)
plt.axhline(0.0,color='r')
plt.setp(ax3.get_xticklabels(), visible=False)
plt.setp(ax3.get_yticklabels(), visible=False)
#
ax4 = plt.subplot(224)
plt.scatter(mdec,mdx)
plt.xlabel(r'$\delta$')
plt.axhline(0.0,color='r')
plt.setp(ax4.get_yticklabels(), visible=False)
#--------------------------------------------------
plt.figure(2)
plt.scatter(mdx,mdy)
plt.axis('scaled')
plt.xlabel(r'$\Delta \alpha$ (arcsec)')
plt.ylabel(r'$\Delta \delta$ (arcsec)')
plt.title('Offsets between matched sources (rmatch = %5.2f)' % rmatch)
plt.axvline(0.0,color='r')
plt.axhline(0.0,color='r')
plt.plot(n.array([mdx0]),n.array([mdy0]),'r*',ms=20)
plt.xlim(-1.1*rmatch,1.1*rmatch)
plt.ylim(-1.1*rmatch,1.1*rmatch)
plt.show()
""" Clean up """
#del ra1,dec1,ra2,dec2
del ramatch,decmatch
return indmatch,nmatch,dxmatch,dymatch
#------------------------------------------------------------------------------
def match_xy(x1, y1, x2, y2, rmatch, dx2=0., dy2=0., doplot=True):
"""
The main function to match coordinates.
Inputs:
x1 - x coordinate in first catalog
y1 - y coordinate in first catalog
x2 - x coordinate in second catalog
y2 - y coordinate in second catalog
rmatch - max distance for a valid match (pixels)
dx2 - optional offset to apply to x2, if there is a known offset
between the catalogs (default=0.0)
dy2 - optional offset to apply to y2, if there is a known offset
between the catalogs (default=0.0)
"""
print ""
print "Catalog info"
print "--------------------------------------------"
print " Catalog 1: %d coordinates" % x1.size
print " Catalog 2: %d coordinates" % x2.size
""" Initialize containers for output information """
nmatch = n.zeros(x1.size,dtype=int)
dxmatch = n.zeros(x1.size)
dymatch = n.zeros(x1.size)
xmatch = n.zeros(x1.size)
ymatch = n.zeros(x1.size)
indmatch = n.ones(x1.size,dtype=int) * -1
""" Correct for known offsets """
x2 = x2.copy() + dx2
y2 = y2.copy() + dy2
""" Loop over catalog1 """
print ""
print "Searching for matches..."
print "------------------------------"
for i in range(x1.size):
dx = x2 - x1[i]
dy = y2 - y1[i]
dpos = n.sqrt(dx**2 + dy**2)
isort = n.argsort(dpos)
if dpos[isort[0]]<=rmatch:
dxmatch[i] = dx[isort[0]]
dymatch[i] = dy[isort[0]]
xmatch[i] = x1[i]
ymatch[i] = y1[i]
nmatch[i] = dpos[dpos<=rmatch].size
indmatch[i] = isort[0]
del dx,dy,dpos
print " Sources in catalog 1 with matches in catalog 2: %d" % \
(nmatch>0).sum()
mdx = dxmatch[nmatch>0]
mdy = dymatch[nmatch>0]
mx = xmatch[nmatch>0]
my = ymatch[nmatch>0]
print " Median offset for matches (X): %+6.2f pixels" % n.median(mdx)
print " Median offset for matches (Y): %+6.2f pixels" % n.median(mdy)
""" Plot up some offsets, if desired """
if doplot:
plt.figure(1)
plt.scatter(mdx,mdy)
plt.xlabel(r'$\Delta x$ (pixels)')
plt.ylabel(r'$\Delta y$ (pixels)')
plt.title('Offsets between matched sources (rmatch = %5.2f)' % rmatch)
plt.axvline(0.0,color='r')
plt.axhline(0.0,color='r')
plt.xlim(-1.1*rmatch,1.1*rmatch)
plt.ylim(-1.1*rmatch,1.1*rmatch)
plt.figure(2)
plt.subplot(221)
plt.scatter(mx,mdy)
plt.ylabel(r'$\Delta y$ (pixels)')
plt.axhline(0.0,color='r')
plt.subplot(223)
plt.scatter(mx,mdx)
plt.xlabel('x')
plt.ylabel(r'$\Delta x$ (pixels)')
plt.axhline(0.0,color='r')
plt.subplot(222)
plt.scatter(my,mdy)
plt.ylabel(r'$\Delta y$ (pixels)')
plt.axhline(0.0,color='r')
plt.subplot(224)
plt.scatter(my,mdx)
plt.xlabel('y')
plt.ylabel('$\Delta x$ (pixels)')
plt.axhline(0.0,color='r')
""" Clean up """
del x1,y1,x2,y2
del xmatch,ymatch
return indmatch,nmatch,dxmatch,dymatch
#-----------------------------------------------------------------------
def matchcat(cat1, cat2, rmatch, dra2=0., ddec2=0., doplot=True):
"""
Does a match on two Secat catalogs that have already been read in from
the appropriate files, or created in some other way. This funccti
"""
""" Make sure that each of the input catalogs has radec coordinates """
if cat1.radec is None:
cat1.get_radec()
if cat2.radec is None:
cat2.get_radec()
if cat1.radec is None or cat2.radec is None:
print ''
print 'ERROR. Cannot match catalogs since RA,Dec information was not'
print ' available in the expected place.'
print ''
return
""" Do the matching """
cat1.indmatch, cat1.nmatch, cat1.matchdx, cat1.matchdy = \
match_coords(cat1.ra, cat1.dec, cat2.ra, cat2.dec, \
rmatch, dra2, ddec2, doplot)
cat1.mask = cat1.indmatch > -1
cat2.mask = cat1.indmatch[cat1.mask]
#-----------------------------------------------------------------------
def find_match(catfile1, catfile2, rmatch, catformat1='ascii',
catformat2='ascii',
racol1=None, deccol1=None, racol2=None, deccol2=None,
rafield1=None, decfield1=None, rafield2=None, decfield2=None,
namecol1=None, namecol2=None, dra2=0., ddec2=0., doplot=True):
"""
The main function to match catalogs contained in two input files. The
input files are expected (for now) to have RA and Dec in decimal degrees
Inputs:
catfile1 - file containing the first catalog
catfile2 - file containing the second catalog
rmatch - max distance for a valid match
catformat1 - Format for first catalog file: 'ascii' (default) or 'ldac'
catformat2 - Format for second catalog file: 'ascii' (default) or 'ldac'
racol1 - column containing RA in the first file
deccol1 - column containing Dec in the first file
racol2 - column containing RA in the second file
deccol2 - column containing Dec in the second file
dra2 - Offset in ARCSEC to add to ra2 in case of known shifts
between cats
ddec2 - Offset in ARCSEC to add to dec2 in case of known shifts
between cats
Outputs: two catalogs that contain the input catalogs plus information
about any matched objects
"""
""" Read inputs """
import catfuncs
try:
cat1 = catfuncs.Secat(catfile1,catformat=catformat1,racol=racol1,
deccol=deccol1,rafield=rafield1,decfield=decfield1,
namecol=namecol1)
cat1.get_radec()
except:
print ""
print "ERROR: Could not read RA and Dec from %s" % catfile1
return
try:
cat2 = catfuncs.Secat(catfile2,catformat=catformat2,racol=racol2,
deccol=deccol2,rafield=rafield2,decfield=decfield2,
namecol=namecol2)
cat2.get_radec()
except:
print ""
print "ERROR: Could not read RA and Dec from %s" % catfile2
return
print ""
#dec2 += 1.39e-4 temporary kludge for fixing Cl1604 matches
""" Do the matching """
#cat1.indmatch,cat1.nmatch,cat1.matchdx,cat1.matchdy = \
# match_coords(cat1.ra,cat1.dec,cat2.ra,cat2.dec, \
# rmatch,dra2,ddec2,doplot)
#
#cat1.mask = cat1.indmatch>-1
#cat2.mask = cat1.indmatch[cat1.mask]
matchcat(cat1,cat2,rmatch,dra2=dra2,ddec2=ddec2,doplot=doplot)
return cat1,cat2
#--------------------------------------------------------------------------
def color_mag(cat1, cat2, magcol1, magcol2, lab1='mag1', lab2='mag2',
coloraxis='y', savematch=True, starsonly=False, doplot=True):
"""
Given catalogs that have been matched by the find_match function,
calculate the colors for the matched objects
Inputs:
cat1 - a Secat catalog produced by find_match
cat2 - a Secat catalog produced by find_match
magcol1 - string describing the column containing the magnitude in
the first catalog
magcol2 - string describing the column containing the magnitude in
the second catalog
lab1 - label for the first magnitude, e.g. 'B'. Default='mag1'
lab2 - label for the first magnitude, e.g. 'V'. Default='mag2'
coloraxis - axis on which to plot the color.
Set this to 'x' for a HR diagram. Default='y'
savematch - Set this to True (the default) to return the matched magnitude
vectors
starsonly - when this parameter is set to true, then use the
starmask masks from each of the input catalogs to
compare only stars.
NOTE: this means that the set_starmask method has to have
been run for each of the input catalogs.
Default=False
doplot - set to True to make a plot. Default=True
"""
""" Compute the color """
if starsonly:
sm1 = cat1.starmask[cat1.mask]
sm2 = cat2.starmask[cat2.mask]
mag1 = cat1.data[sm1][magcol1]
mag2 = cat2.data[sm2][magcol2]
else:
mag1 = cat1.data[cat1.mask][magcol1]
mag2 = cat2.data[cat2.mask][magcol2]
""" Get rid of crazy points """
mask = (mag1<35.) & (mag2<35.) & (mag1 > 5) & (mag2 > 5)
mag1 = mag1[mask]
mag2 = mag2[mask]
magdiff = mag1 - mag2
""" Plot the results if desired """
if doplot:
if coloraxis == 'x':
plt.plot(magdiff,mag2,'bo')
plt.xlabel('%s - %s (mag)' % (lab1,lab2))
plt.ylabel('%s (mag)' % lab2)
else:
plt.plot(mag2,magdiff,'bo')
plt.xlabel('%s (mag)' % lab2)
plt.ylabel('%s - %s (mag)' % (lab1,lab2))
""" Return the matched magnitudes if requested """
del mask,magdiff
if savematch:
return mag1,mag2
else:
del mag1,mag2
#--------------------------------------------------------------------------
def find_zp(datacat, photcat, magcol1, magcol2, lab1='mag_data', lab2='mag_phot',
magmin=12., magmax=23., diffmin=-5., diffmax=5.,
doplot=True, startfig=3, starsonly=False):
"""
Given an input catalog and a photometric catalog that have been matched by
position, find the magnitude offset between the matched objects. This can
be used to find the zero point for the first catalog.
This is essentially a specialized version of color_mag.
Inputs:
datacat - a Secat catalog produced by find_match
photcat - a Secat catalog produced by find_match
magcol1 - string describing the column containing the magnitude in
the data catalog
magcol2 - string describing the column containing the magnitude in
the photometric catalog
lab1 - label for the first magnitude. Default='mag_data'
lab2 - label for the first magnitude. Default='mag_phot'
magmin - minimum magnitude for "good" data points
magmax - maximum magnitude for "good" data points
diffmin - minimum magnitude difference for "good" data points
diffmax - minimum magnitude difference for "good" data points
doplot - set to True to make a plot. Default=True
startfig - two figures will be produced if doplot==True. This
parameter sets the figure number for the first one
starsonly - when this parameter is set to true, then use the
starmask masks from each of the input catalogs to
compare only stars.
NOTE: this means that the set_starmask method has to have
been run for each of the input catalogs
"""
""" Get the magnitude matches and plot them """
if doplot:
plt.figure(startfig)
mdat,mphot = color_mag(datacat,photcat,magcol1,magcol2,lab1,lab2,
doplot=doplot, starsonly=starsonly)
plt.axhline(0.,color='r',lw=2,ls='--')
mdiff = mdat - mphot
if mphot.max() > 35.:
plt.xlim(10.,27.)
if mdiff.max() > 20.:
plt.ylim(-5.,5.)
if magmin>plt.xlim()[0] and magmin<plt.xlim()[1]:
plt.axvline(magmin,color='r',lw=2)
if magmax>plt.xlim()[0] and magmax<plt.xlim()[1]:
plt.axvline(magmax,color='r',lw=2)
if diffmin>plt.ylim()[0] and diffmin<plt.ylim()[1]:
plt.axhline(diffmin,color='r',lw=2)
if diffmax>plt.ylim()[0] and diffmax<plt.ylim()[1]:
plt.axhline(diffmax,color='r',lw=2)
""" Get magnitude difference info """
print ''
print 'Statistics on magnitude differences (all points)'
print '------------------------------------------------'
print 'N_sources %d' % mdiff.size
print 'Range: %6.3f to %6.3f' % (mdiff.min(),mdiff.max())
print 'Mean: %6.3f' % mdiff.mean()
print 'Median: %6.3f' % n.median(mdiff)
print 'Sigma: %6.3f' % mdiff.std()
print 'Sigma_mu: %6.3f' % (mdiff.std() / sqrt(mdiff.size))
mask = (mphot>magmin)&(mphot<magmax)&(mdiff>diffmin)&(mdiff<diffmax)
diffgood = mdiff[mask]
if doplot:
plt.figure(startfig+1)
plt.hist(diffgood,bins=20)
print ''
print 'Statistics on magnitude differences (good sources)'
print '--------------------------------------------------'
print 'N_sources %d' % diffgood.size
print 'Range: %6.3f to %6.3f' % (diffgood.min(),diffgood.max())
print 'Mean: %6.3f' % diffgood.mean()
print 'Median: %6.3f' % n.median(diffgood)
print 'Sigma: %6.3f' % diffgood.std()
print 'Sigma_mu: %6.3f' % (diffgood.std() / sqrt(diffgood.size))
print ''
#--------------------------------------------------------------------------
def write_matchcat(cat1,cat2,outfile,rmatch,c1fluxcol,c2fluxcol):
"""
Writes an output file in the format of the file produced by catcomb.c.
*** NOTE *** The catalog matching (with the find_match function in
matchcat.py) has to have been run before running this code.
Inputs:
cat1 - first catalog used in the matching
cat2 - second catalog used in the matching (needs to be fixed for more)
outfile - output file
rmatch - match radius used for the matching - used only to put info
into the output file.
"""
""" Get info on the matched objects """
c1d = cat1.data
c1id = n.arange(1,c1d.size+1)
c1mi = cat1.indmatch.copy()
ra1 = cat1.ra
dec1 = cat1.dec[cat1.mask]
ct1 = (n.arange(1,cat1.data.size+1))[cat1.mask]
c1m = cat1.data[cat1.mask]
c2m = cat2.data[cat2.mask]
dx = cat1.matchdx[cat1.mask]
dy = cat1.matchdy[cat1.mask]
dpos = n.sqrt(dx**2 + dy**2)
""" Write match info to output file """
ofile = open(outfile,'w')
#
# Need to fix format here to match matchcat.c
#
for i in range(cat1.ra.size):
c1dat = cat1.data[i]
c1flux = c1dat['f%d'% c1fluxcol]
if c1mi[i]>-1:
c2dat = cat2.data[c1mi[i]]
ind2 = c2dat['f0']
flux2 = c2dat['f%d' % c2fluxcol]
dpos = n.sqrt(cat1.matchdx[i]**2 + cat1.matchdy[i]**2)
else:
ind2 = 0
flux2 = 0.
dpos = 0.
ofile.write('%05d %11.7f %+11.7f 0.00 0.00 %5d 0.00 %7.4f ' % \
(c1id[i],cat1.ra[i],cat1.dec[i],c1id[i],c1flux))
ofile.write('%5d %8.2f %6.2f' % (ind2,dpos,flux2))
ofile.write('\n')
ofile.close()
| {
"content_hash": "08bae467f146864526d7141e1ee83c42",
"timestamp": "",
"source": "github",
"line_count": 555,
"max_line_length": 81,
"avg_line_length": 36.47387387387387,
"alnum_prop": 0.5764955787185694,
"repo_name": "cdfassnacht/CodeCDF",
"id": "c2a501771de3acc0ce1ac89c8dc34026122696af",
"size": "20243",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/matchcat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1977044"
},
{
"name": "DIGITAL Command Language",
"bytes": "1074"
},
{
"name": "Dockerfile",
"bytes": "836"
},
{
"name": "IDL",
"bytes": "3470777"
},
{
"name": "Perl",
"bytes": "6217"
},
{
"name": "Prolog",
"bytes": "280792"
},
{
"name": "Python",
"bytes": "341000"
},
{
"name": "Roff",
"bytes": "15608"
},
{
"name": "Shell",
"bytes": "82802"
},
{
"name": "TeX",
"bytes": "159505"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function
import sys
from annoy import AnnoyIndex
import h5py
import progressbar as pb
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('filename')
parser.add_argument('dataset')
parser.add_argument('out_file')
parser.add_argument('--trees', type=int, default=256)
args = parser.parse_args()
with h5py.File(args.filename, 'r') as f:
X = f[args.dataset]
idx = AnnoyIndex(X.shape[1], 'angular')
print("Adding items...", file=sys.stderr, end='')
idx.add_item(X.shape[0] - 1, X[-1]) # preallocate full size
for i, v in enumerate(pb.ProgressBar()(X[:-1])):
idx.add_item(i, v)
print("done.", file=sys.stderr)
print("Building trees...", file=sys.stderr, end='')
idx.build(args.trees)
print("done.", file=sys.stderr)
print("Saving index...", file=sys.stderr, end='')
idx.save(args.out_file)
print("done.", file=sys.stderr)
| {
"content_hash": "2810879e473954760f5e491e71e64570",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 64,
"avg_line_length": 29.451612903225808,
"alnum_prop": 0.683461117196057,
"repo_name": "benbo/XDATAYemen",
"id": "128e282ad31b58a1371217342d06eca683cfb4a6",
"size": "913",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build_annoy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3155"
},
{
"name": "Jupyter Notebook",
"bytes": "232005"
},
{
"name": "Python",
"bytes": "15670"
}
],
"symlink_target": ""
} |
import bpy
from bpy.props import BoolProperty
from mathutils import Vector, Matrix
class PRMAN_OT_Renderman_mesh_reference_pose(bpy.types.Operator):
bl_idname = 'mesh.freeze_reference_pose'
bl_label = "Freeze Reference Pose"
bl_description = "Use the mesh's points and normals for the current frame as the reference pose. This essentially adds the __Pref, __NPref, __Nref and __WNref primitive variables."
add_Pref: BoolProperty(name='Add __Pref', default=True)
add_WPref: BoolProperty(name='Add __WPref', default=True)
add_Nref: BoolProperty(name='Add __Nref', default=True)
add_WNref: BoolProperty(name='Add __WNref', default=True)
def execute(self, context):
mesh = context.mesh
ob = context.object
rm = mesh.renderman
rm.reference_pose.clear()
matrix_world = ob.matrix_world
mesh.calc_normals_split()
for mv in mesh.vertices:
rp = rm.reference_pose.add()
if self.add_Pref:
rp.has_Pref = True
rp.rman__Pref = mv.co
if self.add_WPref:
rp.has_WPref = True
v = Vector(mv.co)
v = matrix_world @ v
rp.rman__WPref = v
if self.add_Nref:
rp.has_Nref = True
rp.rman__Nref = mv.normal
if self.add_WNref:
rp.has_WNref = True
n = Vector(mv.normal)
n = matrix_world @ n
rp.rman__WNref = n
mesh.free_normals_split()
ob.update_tag(refresh={'DATA'})
return {'FINISHED'}
def invoke(self, context, event=None):
wm = context.window_manager
return wm.invoke_props_dialog(self)
classes = [
PRMAN_OT_Renderman_mesh_reference_pose
]
def register():
for cls in classes:
bpy.utils.register_class(cls)
def unregister():
for cls in classes:
try:
bpy.utils.unregister_class(cls)
except RuntimeError:
rfb_log().debug('Could not unregister class: %s' % str(cls))
pass
| {
"content_hash": "b41096a4ba68f14778f7c7e3ce7235ee",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 184,
"avg_line_length": 31.544117647058822,
"alnum_prop": 0.5673659673659673,
"repo_name": "adminradio/RenderManForBlender",
"id": "3863862d924716c139092e21757525d2b2ee5df2",
"size": "2145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rman_operators/rman_operators_mesh.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1055354"
}
],
"symlink_target": ""
} |
import numpy as np
from pyraytracer import mieGenerator as mie
from pyraytracer import scatteringTools as sc
from scipy.stats import lognorm
if __name__ == '__main__':
# Wavelengths (nm) and particle sizes (um)
w = np.linspace(100, 1100, 501)
p = np.linspace(3, 35, 50)
# Log norm parameters from D10, D50 and D90 parameters
particle_mu, particle_sigma = sc.fitLogNormParticleDistribution(
D10=7.3, D50=12.0, D90=18.3)
print('mu: %.6f\nsigma: %.6f' % (particle_mu, particle_sigma))
# Calculate particle distribution
N = lognorm(particle_sigma, scale=np.exp(particle_mu))
# Weight factors of each particle size
pdf = N.pdf(p)
pdf /= pdf.sum()
p_weights = dict(zip(p, pdf))
# Generate mie
df = mie.generateMieDataEffective(wavelengths=w,
p_normed_weights_dict=p_weights,
number_of_rvs=1000,
number_of_theta_angles=90,
n_particle=1.83,
n_silicone=1.55,
p_diameters=p)
# Save mie
mie.saveMieDataToHDF5(df_list=[df],
particle_diameters=[1],
wavelengths=w,
out_fname="mieData.hdf5")
| {
"content_hash": "b6b17d4241f2af2bfd0a183f8dc57872",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 70,
"avg_line_length": 34.1,
"alnum_prop": 0.5300586510263929,
"repo_name": "ollitapa/VTT-Raytracer",
"id": "e4562bef8260e70955cb00bc57a67eef920e11b6",
"size": "1973",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python_source/exampleGenerateMieLogNorm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "488979"
},
{
"name": "Lua",
"bytes": "8826"
},
{
"name": "Python",
"bytes": "72089"
}
],
"symlink_target": ""
} |
import logging
from xml.etree import ElementTree as etree
from xml.parsers import expat
import six
from neutronclient.common import constants
from neutronclient.common import exceptions as exception
from neutronclient.openstack.common.gettextutils import _
from neutronclient.openstack.common import jsonutils
LOG = logging.getLogger(__name__)
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, str(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class DictSerializer(ActionDispatcher):
"""Default request body serialization."""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
return ""
class JSONDictSerializer(DictSerializer):
"""Default JSON request body serialization."""
def default(self, data):
def sanitizer(obj):
return unicode(obj)
return jsonutils.dumps(data, default=sanitizer)
class XMLDictSerializer(DictSerializer):
def __init__(self, metadata=None, xmlns=None):
"""XMLDictSerializer constructor.
:param metadata: information needed to deserialize XML into
a dictionary.
:param xmlns: XML namespace to include with serialized XML
"""
super(XMLDictSerializer, self).__init__()
self.metadata = metadata or {}
if not xmlns:
xmlns = self.metadata.get('xmlns')
if not xmlns:
xmlns = constants.XML_NS_V20
self.xmlns = xmlns
def default(self, data):
"""Default serializer of XMLDictSerializer.
:param data: expect data to contain a single key as XML root, or
contain another '*_links' key as atom links. Other
case will use 'VIRTUAL_ROOT_KEY' as XML root.
"""
try:
links = None
has_atom = False
if data is None:
root_key = constants.VIRTUAL_ROOT_KEY
root_value = None
else:
link_keys = [k for k in six.iterkeys(data) or []
if k.endswith('_links')]
if link_keys:
links = data.pop(link_keys[0], None)
has_atom = True
root_key = (len(data) == 1 and
data.keys()[0] or constants.VIRTUAL_ROOT_KEY)
root_value = data.get(root_key, data)
doc = etree.Element("_temp_root")
used_prefixes = []
self._to_xml_node(doc, self.metadata, root_key,
root_value, used_prefixes)
if links:
self._create_link_nodes(list(doc)[0], links)
return self.to_xml_string(list(doc)[0], used_prefixes, has_atom)
except AttributeError as e:
LOG.exception(str(e))
return ''
def __call__(self, data):
# Provides a migration path to a cleaner WSGI layer, this
# "default" stuff and extreme extensibility isn't being used
# like originally intended
return self.default(data)
def to_xml_string(self, node, used_prefixes, has_atom=False):
self._add_xmlns(node, used_prefixes, has_atom)
return etree.tostring(node, encoding='UTF-8')
#NOTE (ameade): the has_atom should be removed after all of the
# XML serializers and view builders have been updated to the current
# spec that required all responses include the xmlns:atom, the has_atom
# flag is to prevent current tests from breaking
def _add_xmlns(self, node, used_prefixes, has_atom=False):
node.set('xmlns', self.xmlns)
node.set(constants.TYPE_XMLNS, self.xmlns)
if has_atom:
node.set(constants.ATOM_XMLNS, constants.ATOM_NAMESPACE)
node.set(constants.XSI_NIL_ATTR, constants.XSI_NAMESPACE)
ext_ns = self.metadata.get(constants.EXT_NS, {})
for prefix in used_prefixes:
if prefix in ext_ns:
node.set('xmlns:' + prefix, ext_ns[prefix])
def _to_xml_node(self, parent, metadata, nodename, data, used_prefixes):
"""Recursive method to convert data members to XML nodes."""
result = etree.SubElement(parent, nodename)
if ":" in nodename:
used_prefixes.append(nodename.split(":", 1)[0])
#TODO(bcwaldon): accomplish this without a type-check
if isinstance(data, list):
if not data:
result.set(
constants.TYPE_ATTR,
constants.TYPE_LIST)
return result
singular = metadata.get('plurals', {}).get(nodename, None)
if singular is None:
if nodename.endswith('s'):
singular = nodename[:-1]
else:
singular = 'item'
for item in data:
self._to_xml_node(result, metadata, singular, item,
used_prefixes)
#TODO(bcwaldon): accomplish this without a type-check
elif isinstance(data, dict):
if not data:
result.set(
constants.TYPE_ATTR,
constants.TYPE_DICT)
return result
attrs = metadata.get('attributes', {}).get(nodename, {})
for k, v in sorted(data.items()):
if k in attrs:
result.set(k, str(v))
else:
self._to_xml_node(result, metadata, k, v,
used_prefixes)
elif data is None:
result.set(constants.XSI_ATTR, 'true')
else:
if isinstance(data, bool):
result.set(
constants.TYPE_ATTR,
constants.TYPE_BOOL)
elif isinstance(data, int):
result.set(
constants.TYPE_ATTR,
constants.TYPE_INT)
elif isinstance(data, long):
result.set(
constants.TYPE_ATTR,
constants.TYPE_LONG)
elif isinstance(data, float):
result.set(
constants.TYPE_ATTR,
constants.TYPE_FLOAT)
LOG.debug("Data %(data)s type is %(type)s",
{'data': data,
'type': type(data)})
if isinstance(data, str):
result.text = unicode(data, 'utf-8')
else:
result.text = unicode(data)
return result
def _create_link_nodes(self, xml_doc, links):
for link in links:
link_node = etree.SubElement(xml_doc, 'atom:link')
link_node.set('rel', link['rel'])
link_node.set('href', link['href'])
class TextDeserializer(ActionDispatcher):
"""Default request body deserialization."""
def deserialize(self, datastring, action='default'):
return self.dispatch(datastring, action=action)
def default(self, datastring):
return {}
class JSONDeserializer(TextDeserializer):
def _from_json(self, datastring):
try:
return jsonutils.loads(datastring)
except ValueError:
msg = _("Cannot understand JSON")
raise exception.MalformedResponseBody(reason=msg)
def default(self, datastring):
return {'body': self._from_json(datastring)}
class XMLDeserializer(TextDeserializer):
def __init__(self, metadata=None):
"""XMLDeserializer constructor.
:param metadata: information needed to deserialize XML into
a dictionary.
"""
super(XMLDeserializer, self).__init__()
self.metadata = metadata or {}
xmlns = self.metadata.get('xmlns')
if not xmlns:
xmlns = constants.XML_NS_V20
self.xmlns = xmlns
def _get_key(self, tag):
tags = tag.split("}", 1)
if len(tags) == 2:
ns = tags[0][1:]
bare_tag = tags[1]
ext_ns = self.metadata.get(constants.EXT_NS, {})
if ns == self.xmlns:
return bare_tag
for prefix, _ns in ext_ns.items():
if ns == _ns:
return prefix + ":" + bare_tag
else:
return tag
def _get_links(self, root_tag, node):
link_nodes = node.findall(constants.ATOM_LINK_NOTATION)
root_tag = self._get_key(node.tag)
link_key = "%s_links" % root_tag
link_list = []
for link in link_nodes:
link_list.append({'rel': link.get('rel'),
'href': link.get('href')})
# Remove link node in order to avoid link node being
# processed as an item in _from_xml_node
node.remove(link)
return link_list and {link_key: link_list} or {}
def _from_xml(self, datastring):
if datastring is None:
return None
plurals = set(self.metadata.get('plurals', {}))
try:
node = etree.fromstring(datastring)
root_tag = self._get_key(node.tag)
links = self._get_links(root_tag, node)
result = self._from_xml_node(node, plurals)
# There is no case where root_tag = constants.VIRTUAL_ROOT_KEY
# and links is not None because of the way data are serialized
if root_tag == constants.VIRTUAL_ROOT_KEY:
return result
return dict({root_tag: result}, **links)
except Exception as e:
parseError = False
# Python2.7
if (hasattr(etree, 'ParseError') and
isinstance(e, getattr(etree, 'ParseError'))):
parseError = True
# Python2.6
elif isinstance(e, expat.ExpatError):
parseError = True
if parseError:
msg = _("Cannot understand XML")
raise exception.MalformedResponseBody(reason=msg)
else:
raise
def _from_xml_node(self, node, listnames):
"""Convert a minidom node to a simple Python type.
:param node: minidom node name
:param listnames: list of XML node names whose subnodes should
be considered list items.
"""
attrNil = node.get(str(etree.QName(constants.XSI_NAMESPACE, "nil")))
attrType = node.get(str(etree.QName(
self.metadata.get('xmlns'), "type")))
if (attrNil and attrNil.lower() == 'true'):
return None
elif not len(node) and not node.text:
if (attrType and attrType == constants.TYPE_DICT):
return {}
elif (attrType and attrType == constants.TYPE_LIST):
return []
else:
return ''
elif (len(node) == 0 and node.text):
converters = {constants.TYPE_BOOL:
lambda x: x.lower() == 'true',
constants.TYPE_INT:
lambda x: int(x),
constants.TYPE_LONG:
lambda x: long(x),
constants.TYPE_FLOAT:
lambda x: float(x)}
if attrType and attrType in converters:
return converters[attrType](node.text)
else:
return node.text
elif self._get_key(node.tag) in listnames:
return [self._from_xml_node(n, listnames) for n in node]
else:
result = dict()
for attr in node.keys():
if (attr == 'xmlns' or
attr.startswith('xmlns:') or
attr == constants.XSI_ATTR or
attr == constants.TYPE_ATTR):
continue
result[self._get_key(attr)] = node.get(attr)
children = list(node)
for child in children:
result[self._get_key(child.tag)] = self._from_xml_node(
child, listnames)
return result
def default(self, datastring):
return {'body': self._from_xml(datastring)}
def __call__(self, datastring):
# Adding a migration path to allow us to remove unncessary classes
return self.default(datastring)
# NOTE(maru): this class is duplicated from neutron.wsgi
class Serializer(object):
"""Serializes and deserializes dictionaries to certain MIME types."""
def __init__(self, metadata=None, default_xmlns=None):
"""Create a serializer based on the given WSGI environment.
'metadata' is an optional dict mapping MIME types to information
needed to serialize a dictionary to that type.
"""
self.metadata = metadata or {}
self.default_xmlns = default_xmlns
def _get_serialize_handler(self, content_type):
handlers = {
'application/json': JSONDictSerializer(),
'application/xml': XMLDictSerializer(self.metadata),
}
try:
return handlers[content_type]
except Exception:
raise exception.InvalidContentType(content_type=content_type)
def serialize(self, data, content_type):
"""Serialize a dictionary into the specified content type."""
return self._get_serialize_handler(content_type).serialize(data)
def deserialize(self, datastring, content_type):
"""Deserialize a string to a dictionary.
The string must be in the format of a supported MIME type.
"""
return self.get_deserialize_handler(content_type).deserialize(
datastring)
def get_deserialize_handler(self, content_type):
handlers = {
'application/json': JSONDeserializer(),
'application/xml': XMLDeserializer(self.metadata),
}
try:
return handlers[content_type]
except Exception:
raise exception.InvalidContentType(content_type=content_type)
| {
"content_hash": "8823ac3db64d61a6d93c43f6b78b4f9b",
"timestamp": "",
"source": "github",
"line_count": 391,
"max_line_length": 76,
"avg_line_length": 36.82608695652174,
"alnum_prop": 0.5523300229182582,
"repo_name": "rackerlabs/python-neutronclient",
"id": "e2cd7641b75ae7daa9fae9803d16331f4e80d1dd",
"size": "15073",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutronclient/common/serializer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from f5.multi_device.cluster import TrustDomain
from f5.multi_device.exceptions import DeviceAlreadyInTrustDomain
from f5.multi_device.exceptions import DeviceNotTrusted
import mock
import pytest
class MockDeviceInfo(object):
def __init__(self, name):
self.name = name
self.selfDevice = 'true'
self.managementIp = '1.1.1.1'
@pytest.fixture
def BigIPs():
mock_bigips = []
for bigip in range(4):
mock_bigip = mock.MagicMock()
mock_bigip.__name = 'me'
mock_bigip.tm.cm.devices.get_collection.return_value = \
[MockDeviceInfo('test')]
mock_bigip.tm.cm.devices.get_collection.__name__ = 'test'
mock_bigips.append(mock_bigip)
return mock_bigips
@pytest.fixture
def TrustDomainCreateNew(BigIPs):
mock_bigips = BigIPs
td = TrustDomain()
return td, mock_bigips
def test_validate_device_not_trusted(TrustDomainCreateNew):
td, mock_bigips = TrustDomainCreateNew
with pytest.raises(DeviceNotTrusted) as ex:
td.devices = mock_bigips
td.validate()
assert "'test' is not trusted by 'test', which trusts: []" in str(ex.value)
@mock.patch('f5.multi_device.trust_domain.TrustDomain._set_attributes')
@mock.patch('f5.multi_device.trust_domain.TrustDomain.validate')
def test___init__(mock_set_attr, mock_validate, BigIPs):
mock_bigips = BigIPs
td = TrustDomain(devices=mock_bigips)
assert td._set_attributes.call_args == mock.call(devices=mock_bigips)
def test__set_attributes(BigIPs):
mock_bigips = BigIPs
td = TrustDomain()
td._set_attributes(devices=mock_bigips, partition='test')
assert td.devices == mock_bigips
assert td.partition == 'test'
assert td.device_group_name == 'device_trust_group'
assert td.device_group_type == 'sync-only'
@mock.patch('f5.multi_device.trust_domain.TrustDomain._add_trustee')
@mock.patch('f5.multi_device.trust_domain.pollster')
def test_create(mock_add_trustee, mock_pollster, TrustDomainCreateNew):
td, mock_bigips = TrustDomainCreateNew
td.create(devices=mock_bigips, partition='test')
assert td.devices == mock_bigips
assert td.partition == 'test'
assert td._add_trustee.call_args_list == \
[
mock.call(mock_bigips[1]),
mock.call(mock_bigips[2]),
mock.call(mock_bigips[3])
]
@mock.patch('f5.multi_device.trust_domain.TrustDomain._add_trustee')
@mock.patch('f5.multi_device.trust_domain.pollster')
@mock.patch('f5.multi_device.trust_domain.TrustDomain._remove_trustee')
def test_teardown(
mock_add_trustee, mock_pollster, mock_rem_trustee, TrustDomainCreateNew
):
td, mock_bigips = TrustDomainCreateNew
td.create(devices=mock_bigips, partition='test')
td.teardown()
assert td.domain == {}
assert td._remove_trustee.call_args_list == \
[
mock.call(mock_bigips[0]),
mock.call(mock_bigips[1]),
mock.call(mock_bigips[2]),
mock.call(mock_bigips[3])
]
@mock.patch('f5.multi_device.trust_domain.get_device_info')
@mock.patch('f5.multi_device.trust_domain.TrustDomain._modify_trust')
def test__add_trustee(mock_dev_info, mock_mod_trust, TrustDomainCreateNew):
td, mock_bigips = TrustDomainCreateNew
td._set_attributes(devices=mock_bigips, partition='test')
td._add_trustee(mock_bigips[1])
assert td._modify_trust.call_args == \
mock.call(mock_bigips[0], td._get_add_trustee_cmd, mock_bigips[1])
@mock.patch('f5.multi_device.trust_domain.TrustDomain._modify_trust')
def test__add_trustee_already_in_domain(
mock_mod_trust, TrustDomainCreateNew
):
td, mock_bigips = TrustDomainCreateNew
td._set_attributes(devices=mock_bigips, partition='test')
td.domain = {'test': 'device'}
with pytest.raises(DeviceAlreadyInTrustDomain) as ex:
td._add_trustee(mock_bigips[1])
assert "Device: 'test' is already in this trust domain" in str(ex.value)
| {
"content_hash": "caf53885e1e43f358f6376345040b49c",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 79,
"avg_line_length": 34.44347826086957,
"alnum_prop": 0.6824034334763949,
"repo_name": "F5Networks/f5-common-python",
"id": "1698baefafafb3e460b65497d5fb4ac7b583d26b",
"size": "4548",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "f5/multi_device/test/unit/test_trust_domain.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "713"
},
{
"name": "Groovy",
"bytes": "4321"
},
{
"name": "Python",
"bytes": "2705690"
},
{
"name": "Shell",
"bytes": "6398"
}
],
"symlink_target": ""
} |
from norc.core.models.task import *
from norc.core.models.job import *
from norc.core.models.schedules import *
from norc.core.models.scheduler import *
from norc.core.models.queue import *
from norc.core.models.queuegroup import *
from norc.core.models.executor import *
from norc.core.models.extras import *
from norc import settings
| {
"content_hash": "5360a39770baf1b427cdc1625add9d1a",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 41,
"avg_line_length": 33.7,
"alnum_prop": 0.7952522255192879,
"repo_name": "darrellsilver/norc",
"id": "32ca2b09deb8080ac081059013343a31386185d6",
"size": "338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/models/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "20532"
},
{
"name": "Python",
"bytes": "165903"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
import os
# provide correct path for version
__version__ = None
here = os.path.dirname(os.path.dirname(__file__))
exec(open(os.path.join(here, 'weibull/version.py')).read())
with open('readme.md', 'r') as f:
readme = f.read()
requirements = [
'pandas >= 0.20.0',
'matplotlib >= 2.0',
'numpy >= 1.0',
'scipy >= 1.0.0',
]
setup_requirements = [
'flake8 >= 3.5.0',
'pytest >= 1.4.0',
'sphinx >= 1.6.5'
]
setup(
name='weibull',
version=__version__,
description='Weibull analysis and test design for reliability and life applications',
long_description=readme,
long_description_content_type='text/markdown',
author='Jason R. Jones',
author_email='slightlynybbled@gmail.com',
url='https://github.com/slightlynybbled/weibull',
packages=find_packages(),
include_package_data=True,
install_requires=requirements,
setup_requires=setup_requirements,
zip_safe=True,
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Natural Language :: English'
],
keywords='weibull reliability'
) | {
"content_hash": "6fc6564f471a138653eb1d30ffac8025",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 89,
"avg_line_length": 26.9375,
"alnum_prop": 0.6318638824439289,
"repo_name": "slightlynybbled/weibull",
"id": "6df7f798b49e3132cfe73b860fc255648e6f27b7",
"size": "1293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45070"
}
],
"symlink_target": ""
} |
from typing import Dict
import json
import os
from kfp.v2 import dsl
def update_output_artifact(executor_input: str,
target_artifact_name: str,
uri: str,
metadata: dict = {}):
"""Updates the output artifact with the new uri and metadata."""
executor_input_json = json.loads(executor_input)
executor_output = {'artifacts': {}}
for name, artifacts in executor_input_json.get('outputs',
{}).get('artifacts',
{}).items():
artifacts_list = artifacts.get('artifacts')
if name == target_artifact_name and artifacts_list:
updated_runtime_artifact = artifacts_list[0]
updated_runtime_artifact['uri'] = uri
updated_runtime_artifact['metadata'] = metadata
artifacts_list = {'artifacts': [updated_runtime_artifact]}
executor_output['artifacts'][name] = artifacts_list
# update the output artifacts.
os.makedirs(
os.path.dirname(executor_input_json['outputs']['outputFile']),
exist_ok=True)
with open(executor_input_json['outputs']['outputFile'], 'w') as f:
f.write(json.dumps(executor_output))
# Writes a list of Artifacts to the executor output file.
def update_output_artifacts(executor_input: str, artifacts: list):
"""Updates a list of Artifacts to the executor output file."""
executor_input_json = json.loads(executor_input)
executor_output = {'artifacts': {}}
output_artifacts = executor_input_json.get('outputs', {}).get('artifacts', {})
# This assumes that no other output artifact exists.
for artifact in artifacts:
if artifact.name in output_artifacts.keys():
# Converts the artifact into executor output artifact
# https://github.com/kubeflow/pipelines/blob/master/api/v2alpha1/pipeline_spec.proto#L878
artifacts_list = output_artifacts[artifact.name].get('artifacts')
if artifacts_list:
updated_runtime_artifact = artifacts_list[0]
updated_runtime_artifact['uri'] = artifact.uri
updated_runtime_artifact['metadata'] = artifact.metadata
artifacts_list = {'artifacts': [updated_runtime_artifact]}
executor_output['artifacts'][artifact.name] = artifacts_list
# update the output artifacts.
os.makedirs(
os.path.dirname(executor_input_json['outputs']['outputFile']),
exist_ok=True)
with open(executor_input_json['outputs']['outputFile'], 'w') as f:
f.write(json.dumps(executor_output))
| {
"content_hash": "757d199e3ede2dad5daf26928bcef3ab",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 97,
"avg_line_length": 44.1551724137931,
"alnum_prop": 0.6458414681764936,
"repo_name": "kubeflow/pipelines",
"id": "bffc15a7fd8032dd75286b0ba750726ff92a8134",
"size": "3168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "components/google-cloud/google_cloud_pipeline_components/container/v1/gcp_launcher/utils/artifact_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "799"
},
{
"name": "CSS",
"bytes": "2171"
},
{
"name": "Dockerfile",
"bytes": "49331"
},
{
"name": "Go",
"bytes": "1903937"
},
{
"name": "HTML",
"bytes": "3656"
},
{
"name": "JavaScript",
"bytes": "544297"
},
{
"name": "Jinja",
"bytes": "938"
},
{
"name": "Jupyter Notebook",
"bytes": "359548"
},
{
"name": "Makefile",
"bytes": "22164"
},
{
"name": "Mustache",
"bytes": "23652"
},
{
"name": "PowerShell",
"bytes": "3194"
},
{
"name": "Python",
"bytes": "5684887"
},
{
"name": "Shell",
"bytes": "264595"
},
{
"name": "Smarty",
"bytes": "8295"
},
{
"name": "Starlark",
"bytes": "553"
},
{
"name": "TypeScript",
"bytes": "4294958"
}
],
"symlink_target": ""
} |
import sys, os, argparse
from paraview.simple import *
from paraview import data_exploration as wx
#import matplotlib.pyplot as plt
###############################################################################
# Helper function to generate the tent functions needed for scalar opacity
# function
###############################################################################
def createHatFunctions():
baseWidth = 0.20
spacing = baseWidth / 2.0
halfWidth = baseWidth / 2.0
numberCenters = 1.0 / baseWidth
centers = [ (baseWidth / 2.0) + (i * baseWidth) for i in range(int(numberCenters)) ]
hatFunctions = []
for c in centers:
startPoint = c - halfWidth
xPoints = [ 0.0, startPoint, startPoint + spacing, startPoint + (2 * spacing), 1.0 ]
yPoints = [ 0.0, 0.0, 1.0, 0.0, 0.0 ]
hatFunctions.append([xPoints, yPoints])
#plt.plot(xPoints, yPoints, marker='o')
#plt.show()
return hatFunctions
###############################################################################
# This method does all the processing
###############################################################################
def doProcessing(inputDir, inputPattern, outputDir, opacityFnType):
# -----------------------------------------------------------------------------
# Path to input/output data/directories
# -----------------------------------------------------------------------------
files_pattern = os.path.join(inputDir, inputPattern)
file_times = range(0, 101)
#file_times = [ 80 ]
filenames = [ (files_pattern % time) for time in file_times]
# -----------------------------------------------------------------------------
# Rendering configuration
# -----------------------------------------------------------------------------
resolution = 500
view_size = [resolution, resolution]
angle_steps = [15, 15]
#angle_steps = [90, 90]
distance = 24632.991324377483
rotation_axis = [0.0, 1.0, 0.0]
#center_of_rotation = [-1649.1046142578125, -752.328125, 1374.1217346191406]
center_of_rotation = [0.0, 0.0, 0.0]
view = GetRenderView()
view.ViewSize = view_size
view.Background = [0.0, 0.0, 0.0]
view.OrientationAxesVisibility = 0
view.CenterAxesVisibility = 0
# -----------------------------------------------------------------------------
# Output configuration
# -----------------------------------------------------------------------------
fng = wx.FileNameGenerator(outputDir, '{time}/{volumeIdx}/{theta}_{phi}.jpg')
exporter = wx.ThreeSixtyImageStackExporter(fng,
view,
center_of_rotation,
distance,
rotation_axis,
angle_steps)
# -----------------------------------------------------------------------------
# Pipeline configuration
# -----------------------------------------------------------------------------
# create a new 'Legacy VTK Reader'
readerProxy = LegacyVTKReader(FileNames=filenames)
# This translation transform is a workaround for a bug in the camera orbiting
# calculations made in ThreeSixtyImageStackExporter
transform1 = Transform(Input=readerProxy)
transform1.Transform = 'Transform'
transform1.Transform.Translate = [1649.1046142578125, 752.328125, -1374.1217346191406]
# create a new 'Cell Data to Point Data'
cellDatatoPointData1 = CellDatatoPointData(Input=transform1)
# get color transfer function/color map for 'vorticity'
vorticityLUT = GetColorTransferFunction('vorticity')
vorticityLUT.RGBPoints = [0.0, 0.0, 0.0, 1.0, 200.0, 1.0, 0.0, 0.0]
vorticityLUT.LockScalarRange = 1
vorticityLUT.ColorSpace = 'HSV'
vorticityLUT.NanColor = [0.498039, 0.498039, 0.498039]
vorticityLUT.ScalarRangeInitialized = 1.0
# get opacity transfer function/opacity map for 'vorticity'
vorticityPWF = GetOpacityTransferFunction('vorticity')
vorticityPWF.Points = [0.0, 0.0, 0.5, 0.0, 200.0, 1.0, 0.5, 0.0]
vorticityPWF.ScalarRangeInitialized = 1
# show data from fine_results_
readerDisplay = Show(transform1)
readerDisplay.ColorArrayName = [None, '']
readerDisplay.Opacity = 0.15
readerDisplay.ScalarOpacityUnitDistance = 158.07645437184576
# show data from cellDatatoPointData1
cellDatatoPointData1Display = Show(cellDatatoPointData1)
cellDatatoPointData1Display.Representation = 'Volume'
cellDatatoPointData1Display.ColorArrayName = ['POINTS', 'vorticity']
cellDatatoPointData1Display.LookupTable = vorticityLUT
cellDatatoPointData1Display.ScalarOpacityFunction = vorticityPWF
cellDatatoPointData1Display.ScalarOpacityUnitDistance = 158.07645437184576
# -----------------------------------------------------------------------------
# Batch processing
# -----------------------------------------------------------------------------
if opacityFnType == 'tent':
hatFunctions = createHatFunctions()
Render()
for t in range(0, len(file_times), 1):
time = file_times[t]
GetAnimationScene().TimeKeeper.Time = float(time)
UpdatePipeline(time)
dataRange = [0.0, 200.0]
print "Moving to timestep ",time,", new data range: ",dataRange
for volumeIdx in range(5):
curRange = dataRange[1] - dataRange[0]
pwfPoints = []
if opacityFnType == 'tent':
xPoints = hatFunctions[volumeIdx][0]
yPoints = hatFunctions[volumeIdx][1]
for i in range(len(xPoints)):
pwfPoints.append(dataRange[0] + (xPoints[i] * curRange))
pwfPoints.append(yPoints[i])
pwfPoints.append(0.5)
pwfPoints.append(0.0)
else:
curStep = dataRange[0] + (float(volumeIdx) * (curRange / 5.0))
pwfPoints = [ dataRange[0], 0.0, 0.5, 0.0,
curStep, 0.0, 0.5, 0.0,
dataRange[1], 1.0, 0.5, 0.0 ]
newPwf = CreatePiecewiseFunction( Points=pwfPoints )
cellDatatoPointData1Display.ScalarOpacityFunction = newPwf
fng.update_active_arguments(volumeIdx=volumeIdx)
fng.update_label_arguments(volumeIdx="Idx")
exporter.UpdatePipeline(time)
###############################################################################
# Main script entry point
###############################################################################
if __name__ == "__main__":
description = "Python script to generate volume rendered NE cooling data"
parser = argparse.ArgumentParser(description=description)
parser.add_argument("--inputdir", type=str, default="", help="Path to directory where input data files exist")
parser.add_argument("--inputpattern", type=str, default="", help="String pattern containing %d where pattern should be replaced with numbers")
parser.add_argument("--outputdir", type=str, default="", help="Path to directory where cinema dataset should be written")
parser.add_argument("--optype", type=str, default="", help="Opacity function type, should be either 'tent' or 'linear'")
args = parser.parse_args()
doProcessing(args.inputdir, args.inputpattern, args.outputdir, args.optype)
| {
"content_hash": "864d30d9f23c15300a12db7f9633f3e7",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 146,
"avg_line_length": 42.88135593220339,
"alnum_prop": 0.5259552042160738,
"repo_name": "Kitware/cinema",
"id": "f2a6c3421140296dddde8ada9c9ac8b33e00dae8",
"size": "9252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/data_generation/ne-cooling/volume-vorticity.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "6645"
},
{
"name": "CMake",
"bytes": "1046"
},
{
"name": "CSS",
"bytes": "160244"
},
{
"name": "HTML",
"bytes": "108738"
},
{
"name": "JavaScript",
"bytes": "628674"
},
{
"name": "Python",
"bytes": "244273"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(
name='jenkins_tools',
version='0.0.69',
packages=['jenkins_tools'],
package_dir={'jenkins_tools': 'src/jenkins_tools'},
scripts=['scripts/run_chroot_jenkins_now',
'scripts/run_chroot_jenkins_periodic',
'scripts/run_chroot_jenkins_vcs',
'scripts/run_chroot_local',
'scripts/delete_jenkins',
'scripts/generate_jenkins_devel',
'scripts/generate_jenkins_doc',
'scripts/generate_jenkins_prerelease',
'scripts/chroot_create.sh',
'scripts/chroot_update.sh',
'scripts/chroot_dispatch.sh'],
install_requires=['empy', 'PyYAML', 'jenkins', 'argparse', 'rosdep', 'rosdistro >= 0.2.7', 'rospkg', 'catkin-pkg', 'distribute'],
package_data={'jenkins_tools': ['resources/templates/*']},
author='Wim Meeussen',
author_email='wim@hidof.com',
maintainer='Dirk Thomas',
maintainer_email='dthomas@osrfoundation.org',
url='http://wiki.ros.org/jenkins_tools',
download_url='http://download.ros.org/downloads/jenkins_tools/',
keywords=['ROS'],
classifiers=['Programming Language :: Python',
'License :: OSI Approved :: BSD License'],
description="A tool for running scripts in a chroot environment on Jenkins or locally",
long_description="""
A tool for running scripts in a chroot environment on Jenkins or locally""",
license='BSD'
)
| {
"content_hash": "c5aa3b8a0eaafc82bbe064dc66268743",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 133,
"avg_line_length": 43.1764705882353,
"alnum_prop": 0.6301089918256131,
"repo_name": "ros-infrastructure/jenkins_tools",
"id": "5217efde7f35a57b26a4a452ee5ef21a3b98c03a",
"size": "1491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "4850"
},
{
"name": "Python",
"bytes": "32117"
},
{
"name": "Shell",
"bytes": "6756"
}
],
"symlink_target": ""
} |
import sys
import os
import math
import shutil
from random import randint
resultDir = os.environ.get('RESULTS')
if resultDir == None :
print ("WARNING! $RESULTS not set! Attempt to write results will fail!\n")
# Expecting input
from KMCLib import *
from KMCLib.Backend import Backend
import numpy
#from RateCalc import *
from DensHist import *
from energyStats3d import *
ovConc = float(sys.argv[1])
rateConstFull = float(sys.argv[2])
sysWidth = int(sys.argv[3])
sysLength = int(sys.argv[4])
sysDepth = int(sys.argv[5])
analInterval = int(sys.argv[6])
numStepsEquilib = int(sys.argv[7])
numStepsAnal = int(sys.argv[8])
numStepsReq = int(sys.argv[9])
numPasses = int(sys.argv[10])
fileInfo = sys.argv[11]
resultsPlace = resultDir+"/"+fileInfo+"/"
if not os.path.exists(resultsPlace):
os.makedirs(resultsPlace)
with open(resultsPlace+'settings', 'w') as f:
f.write('OverallConcentration = ' + str(ovConc) +'\n')
f.write('FullRate = ' + str(rateConstFull) +'\n')
f.write('SysWidth = ' + str(sysWidth) +'\n')
f.write('SysLength = ' + str(sysLength) +'\n')
f.write('SysDepth = ' + str(sysDepth) +'\n')
f.write('AnalInterval = ' +str(analInterval) + '\n')
f.write('NumStepsEquilib = '+str(numStepsEquilib) +'\n')
f.write('NumStepsAnal = '+str(numStepsAnal) +'\n')
"""I've put this in the file to make command line input easier"""
# Load the configuration and interactions.
# We're in 3d
cell_vectors = [[1.0,0.0,0.0],
[0.0,1.0,0.0],
[0.0,0.0,1.0]]
# Only bothering with one set
basis_points = [[0.0, 0.0, 0.0]]
unit_cell = KMCUnitCell(cell_vectors=cell_vectors,
basis_points=basis_points)
# Define the lattice.
xRep = sysWidth
yRep = sysLength
zRep = sysDepth
numPoints = xRep*zRep*yRep
lattice = KMCLattice(unit_cell=unit_cell,
repetitions=(xRep,yRep,zRep),
periodic=(True, True, True))
# Generate the initial types
types = []
types = ["V"]*numPoints
numParticles = int(numPoints*ovConc)
i=0
firstPass = True
while firstPass or ( i <= numParticles and i < numPoints-1 ):
firstPass = False
typePos = randint(0, numPoints-1)
if types[typePos] == "V":
types[typePos] = "O"
i += 1
# Setup the configuration.
configuration = KMCConfiguration(lattice=lattice,
types=types,
possible_types=["O","V"])
# Rates.
rateConstEmpty = 1.0
#
##
###
"""I've put the processes in here to make it easier to adjust them via command line arguments."""
# Fill the list of processes.
processes = []
# Only on the first set of basis_points for O/V
basis_sites = [0]
# Bulk processes
# Up
#0
elements_before = ["O", "V"]
elements_after = ["V", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 1.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise
# Down
#1
elements_before = ["O", "V"]
elements_after = ["V", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, -1.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise
# Left
#2
elements_before = ["O", "V"]
elements_after = ["V", "O"]
coordinates = [[0.0, 0.0, 0.0], [-1.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise
# Right
#3
elements_before = ["O", "V"]
elements_after = ["V", "O"]
coordinates = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise
# Back
#4
elements_before = ["O", "V"]
elements_after = ["V", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, -1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise
# Forward
#5
elements_before = ["O", "V"]
elements_after = ["V", "O"]
coordinates = [[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]
processes.append( KMCProcess(coordinates=coordinates,
elements_before=elements_before,
elements_after=elements_after,
basis_sites=basis_sites,
rate_constant=1.0))
# Will customise
# Create the interactions object.
interactions = KMCInteractions(processes, implicit_wildcards=True)
# Define the custom rates calculator, using the lol model as a template
class modelRates3d(KMCRateCalculatorPlugin):
# Class for defining the custom rates function for the KMCLib paper.
def rate(self, geometry, elements_before, elements_after, rate_constant, process_number, global_coordinate):
numNeighbours = len([e for e in elements_before if e == "O"])
return math.pow(rateConstFull, numNeighbours-1)
def cutoff(self):
# Overloaded base class API function
return 1.0
interactions.setRateCalculator(rate_calculator=modelRates3d)
"""End of processes"""
###
##
#
# Create the model.
model = KMCLatticeModel(configuration, interactions)
# Define the parameters; not entirely sure if these are sensible or not...
control_parameters_equilib = KMCControlParameters(number_of_steps=numStepsEquilib, analysis_interval=numStepsEquilib/100,
dump_interval=numStepsEquilib/10)
control_parameters_req = KMCControlParameters(number_of_steps=numStepsReq, analysis_interval=numStepsReq/100,
dump_interval=numStepsReq/10)
control_parameters_anal = KMCControlParameters(number_of_steps=numStepsAnal, analysis_interval=1,
dump_interval=numStepsAnal/10)
# Run the simulation - save trajectory to resultsPlace, which should by now exist
model.run(control_parameters_equilib, trajectory_filename=(resultsPlace+"equilib.traj"))
if not os.path.exists(resultsPlace+"enHists"):
os.makedirs(resultsPlace+"enHists")
ovEnHist = []
for index in range(0, 3*numPoints):
ovEnHist.append(0.0)
for passNum in range(0, numPasses):
enHist = energyStats3d(spec=["O"])
model.run(control_parameters_req, trajectory_filename=("/dev/null"))
model.run(control_parameters_anal, trajectory_filename=("/dev/null"), analysis=[enHist])
with open(resultsPlace+"enHists/enHist"+str(passNum)+".dat", 'w') as f:
pass
with open(resultsPlace+"enHists/enHist"+str(passNum)+".dat", 'a') as f:
enHist.printResults(f)
with open(resultsPlace+"enHists/enHist"+str(passNum)+".dat", 'r') as f:
lines = f.readlines()
for index in range(0, 3*numPoints):
words = lines[index].split()
ovEnHist[index] += float(words[1])
os.remove(resultsPlace+"enHists/enHist"+str(passNum)+".dat")
with open(resultsPlace+"ovEnHist.dat", 'w') as f:
for index in range(0, 3*numPoints):
f.write(str(index)+" "+str(ovEnHist[index]/float(numPasses))+"\n")
shutil.rmtree(resultsPlace+"enHists", ignore_errors=True)
print("Process would appear to have succesfully terminated! How very suspicious...")
| {
"content_hash": "a27dcab69a562337a6ebee0560461261",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 121,
"avg_line_length": 31.41015625,
"alnum_prop": 0.6134809103345356,
"repo_name": "joshuahellier/PhDStuff",
"id": "1b3f2b56a16a4735173d301d521977b1b0563ae3",
"size": "8041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codes/kmc/3Dim/periodicEnergyCalc/3dPeriodic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "4177"
},
{
"name": "Fortran",
"bytes": "10357"
},
{
"name": "Mathematica",
"bytes": "729947"
},
{
"name": "Python",
"bytes": "786744"
},
{
"name": "Roff",
"bytes": "9"
},
{
"name": "Shell",
"bytes": "6313"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from login.management.commands import demo_data_login
class TestCommand(TestCase):
def test_command_demo_data_login(self):
command = demo_data_login.Command()
command.handle()
| {
"content_hash": "6feb1b994ee12ae5ab23ed35ffa1d4d0",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 53,
"avg_line_length": 23.3,
"alnum_prop": 0.7253218884120172,
"repo_name": "pkimber/login",
"id": "ab8e5bd48f3db4f59a222086a0bcc3824cab42ba",
"size": "259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "login/tests/test_management_command.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "13074"
},
{
"name": "Python",
"bytes": "45143"
},
{
"name": "Shell",
"bytes": "389"
}
],
"symlink_target": ""
} |
from enum import Enum
from azure.core import CaseInsensitiveEnumMeta
class ComparisonOperationType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""the operator that is used to compare the metric data and the threshold."""
EQUALS = "Equals"
NOT_EQUALS = "NotEquals"
GREATER_THAN = "GreaterThan"
GREATER_THAN_OR_EQUAL = "GreaterThanOrEqual"
LESS_THAN = "LessThan"
LESS_THAN_OR_EQUAL = "LessThanOrEqual"
class CreatedByType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The type of identity that created the resource."""
USER = "User"
APPLICATION = "Application"
MANAGED_IDENTITY = "ManagedIdentity"
KEY = "Key"
class MetricStatisticType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""the metric statistic type. How the metrics from multiple instances are combined."""
AVERAGE = "Average"
MIN = "Min"
MAX = "Max"
SUM = "Sum"
COUNT = "Count"
class PredictiveAutoscalePolicyScaleMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""the predictive autoscale mode."""
DISABLED = "Disabled"
FORECAST_ONLY = "ForecastOnly"
ENABLED = "Enabled"
class RecurrenceFrequency(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""the recurrence frequency. How often the schedule profile should take effect. This value must be
Week, meaning each week will have the same set of profiles. For example, to set a daily
schedule, set **schedule** to every day of the week. The frequency property specifies that the
schedule is repeated weekly.
"""
NONE = "None"
SECOND = "Second"
MINUTE = "Minute"
HOUR = "Hour"
DAY = "Day"
WEEK = "Week"
MONTH = "Month"
YEAR = "Year"
class ScaleDirection(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""the scale direction. Whether the scaling action increases or decreases the number of instances."""
NONE = "None"
INCREASE = "Increase"
DECREASE = "Decrease"
class ScaleRuleMetricDimensionOperationType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""the dimension operator. Only 'Equals' and 'NotEquals' are supported. 'Equals' being equal to
any of the values. 'NotEquals' being not equal to all of the values.
"""
EQUALS = "Equals"
NOT_EQUALS = "NotEquals"
class ScaleType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""the type of action that should occur when the scale rule fires."""
CHANGE_COUNT = "ChangeCount"
PERCENT_CHANGE_COUNT = "PercentChangeCount"
EXACT_COUNT = "ExactCount"
SERVICE_ALLOWED_NEXT_VALUE = "ServiceAllowedNextValue"
class TimeAggregationType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""time aggregation type. How the data that is collected should be combined over time. The default
value is Average.
"""
AVERAGE = "Average"
MINIMUM = "Minimum"
MAXIMUM = "Maximum"
TOTAL = "Total"
COUNT = "Count"
LAST = "Last"
| {
"content_hash": "a91d6efaa20d115f1ccee2cdf098a1d7",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 105,
"avg_line_length": 30.53125,
"alnum_prop": 0.7018082565677243,
"repo_name": "Azure/azure-sdk-for-python",
"id": "57efeee7708931786d8218809f47118b5f8e600b",
"size": "3399",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2022_10_01/models/_monitor_management_client_enums.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from tests import settings
try:
import urllib.parse as urlrequest
except ImportError:
import urllib as urlrequest
from .resources import Card, CardRegistration
from .test_base import BaseTest
import requests
import responses
class CardsTest(BaseTest):
@responses.activate
def test_cards_registration(self):
"""
Card registration process:
- Create a CardRegistration object
- Receive a CardRegistration object
- Send card details to the Tokenization server
- Receive RegistrationData
- Edit the CardRegistration with received RegistrationData
"""
self.mock_natural_user()
self.mock_card()
self.mock_tokenization_request()
self.register_mock({
'method': responses.GET,
'url': settings.MANGOPAY_API_SANDBOX_URL+settings.MANGOPAY_CLIENT_ID+'/users/1169419/cards',
'body': [
{
"ExpirationDate": "1214",
"Alias": "497010XXXXXX4406",
"CardType": "CB",
"Country": "",
"Product": "",
"BankCode": "",
"Active": True,
"Currency": "XXX",
"Validity": "VALID",
"UserId": "1167495",
"Id": "1167507",
"Tag": None,
"CreationDate": 1382608428
}
],
'status': 200
})
# Create a CardRegistration object
card_params = {
"user": self.natural_user,
"currency": 'EUR'
}
card_registration = CardRegistration(**card_params)
card_registration.save()
for key, value in card_params.items():
self.assertEqual(getattr(card_registration, key), value)
self.assertIsNotNone(card_registration.get_pk())
# Send card details to the Tokenization server
response = requests.post(card_registration.card_registration_url, urlrequest.urlencode({
'cardNumber': '4970100000000154',
'cardCvx': '123',
'cardExpirationDate': '0120',
'accessKeyRef': card_registration.access_key,
'data': card_registration.preregistration_data
}))
# Edit the CardRegistration with received RegistrationData
previous_pk = card_registration.get_pk()
card_registration.registration_data = response.text
card_registration.save()
self.assertEqual(previous_pk, card_registration.get_pk())
self.assertIsNotNone(card_registration.registration_data)
self.assertEqual(card_registration.registration_data, response.text)
self.assertEqual(card_registration.status, 'VALIDATED')
self.assertEqual(card_registration.result_message, 'Success')
self.assertEqual(card_registration.result_code, '000000')
self.assertIsNotNone(card_registration.card_id) # We now have a card id!
# Fetch the new card thanks to card_id
self.assertIsNotNone(card_registration.card_id)
card = Card.get(card_registration.card_id)
self.assertIsNotNone(card.get_pk())
self.assertEqual(card.get_pk(), card_registration.card_id)
# Retrieve user's cards
self.assertEqual(len(self.natural_user.cards.all()), 1)
self.assertEqual(self.natural_user.cards.all()[0], card)
self.assertEqual(self.natural_user.cards.get(card.id), card)
def test_desactive_card(self):
# self.card.active = False
# self.card.save()
pass
| {
"content_hash": "6f89cfc19a6865f143bb2cfc869b3576",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 104,
"avg_line_length": 35.11538461538461,
"alnum_prop": 0.5930996714129244,
"repo_name": "chocopoche/mangopay2-python-sdk",
"id": "e2afb7cf028f7010e4c0fe64abebcd9cb4e1d2a6",
"size": "3676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_cards.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "178"
},
{
"name": "Python",
"bytes": "330033"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ZautoValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="zauto", parent_name="densitymapbox", **kwargs):
super(ZautoValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
**kwargs,
)
| {
"content_hash": "d1f62a8a5488b07f8034ab08f452ff67",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 83,
"avg_line_length": 38.083333333333336,
"alnum_prop": 0.6148796498905909,
"repo_name": "plotly/plotly.py",
"id": "e095394f995fddb75170764f74caa4010ce6eff1",
"size": "457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/densitymapbox/_zauto.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import json
import sys
headers = ['id', 'supported by', 'compatible with', 'conflicts with', 'is trivial in']
int_keys = [u'supported_by', u'partial_path_of', u'conflicts_with', u'terminal']
internal_rows = []
terminal_rows = []
def url_for(study, tree, node):
fmt = 'node {n} for https://tree.opentreeoflife.org/curator/study/view/{s}/?tab=trees&tree={t}'
return fmt.format(s=study, t=tree, n=node)
try:
annotations_fn = sys.argv[1]
except:
sys.exit('''Pass in the annotated_supertree/annotations.json file from the propinquity-based synthesis.
This script will create a tab separated view of the annotations file.
''')
with open(annotations_fn, 'rU') as inp:
d = json.load(inp)
source_id_map = d['source_id_map']
#print source_id_map
n = d['nodes']
vs = set()
for node_id, supp_conf in n.items():
if ('terminal' not in supp_conf) or (len(supp_conf) > 1):
for n, ik in enumerate(int_keys):
for v in supp_conf.get(ik, []):
nr = [node_id, '', '', '', '']
study_tree_key = v[0]
blob = source_id_map[study_tree_key]
study_id, tree_id = blob['study_id'], blob['tree_id']
nr[1 + n] = url_for(study_id, tree_id, v[1])
internal_rows.append(nr)
else:
for v in supp_conf['terminal']:
nr = [node_id, '']
study_tree_key = v[0]
blob = source_id_map[study_tree_key]
study_id, tree_id = blob['study_id'], blob['tree_id']
nr[1] = url_for(study_id, tree_id, v[1])
terminal_rows.append(nr)
internal_rows.sort()
out = sys.stdout
out.write('{}\n'.format('\t'.join(headers)))
for row in internal_rows:
out.write('{}\n'.format('\t'.join(row)))
out.write('{}\n'.format('\t'.join(['Terminal node id', 'included in'])))
for row in terminal_rows:
out.write('{}\n'.format('\t'.join(row))) | {
"content_hash": "ed3b8087dc988d41df723a0fc9a5d627",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 107,
"avg_line_length": 39.7,
"alnum_prop": 0.5596977329974812,
"repo_name": "OpenTreeOfLife/propinquity",
"id": "9bd3ffb99c3ff22033c532e577834af26b43128d",
"size": "2007",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bin/annotations-to-tsv.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "1916"
},
{
"name": "Makefile",
"bytes": "5843"
},
{
"name": "Python",
"bytes": "112049"
},
{
"name": "Shell",
"bytes": "24857"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(
name='django-rest-framework-mongoengine',
version='2.0.2',
description='MongoEngine support for Django Rest Framework.',
packages=['rest_framework_mongoengine',],
license='see https://github.com/umutbozkurt/django-rest-framework-mongoengine/blob/master/LICENSE',
long_description='see https://github.com/umutbozkurt/django-rest-framework-mongoengine/blob/master/README.md',
url='https://github.com/umutbozkurt/django-rest-framework-mongoengine',
download_url='https://github.com/umutbozkurt/django-rest-framework-mongoengine/releases/',
keywords=['mongoengine', 'serializer', 'django rest framework'],
author='Umut Bozkurt',
author_email='umutbozkurt92@gmail.com',
requires=['mongoengine', 'djangorestframework'],
classifiers=['Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Testing',
'Topic :: Internet',
'Topic :: Internet :: WWW/HTTP :: Site Management',
'Topic :: Text Processing :: Markup :: HTML',
'Intended Audience :: Developers'
],
)
| {
"content_hash": "e2d8b94b021489841f5335de942eedb0",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 114,
"avg_line_length": 52.22222222222222,
"alnum_prop": 0.6375886524822695,
"repo_name": "BryanAke/django-rest-framework-mongoengine",
"id": "494881c253085fc265f4d1ec52615505ca1ad86f",
"size": "1410",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88311"
}
],
"symlink_target": ""
} |
from __future__ import print_function, unicode_literals, absolute_import
from builtins import dict, str
import logging
import itertools
import rdflib.namespace
from copy import deepcopy
from indra.preassembler.hierarchy_manager import HierarchyManager, \
UnknownNamespaceException, hierarchies as default_hierarchies
from indra.databases import hgnc_client
from indra.statements import Agent, Complex, Evidence
logger = logging.getLogger(__name__)
class Expander(object):
def __init__(self, hierarchies=None):
if hierarchies is None:
self.entities = default_hierarchies['entity']
else:
self.entities = hierarchies['entity']
def expand_families(self, stmts):
"""Generate statements by expanding members of families and complexes.
"""
new_stmts = []
for stmt in stmts:
# Put together the lists of families, with their members. E.g.,
# for a statement involving RAF and MEK, should return a list of
# tuples like [(BRAF, RAF1, ARAF), (MAP2K1, MAP2K2)]
families_list = []
for ag in stmt.agent_list():
ag_children = self.get_children(ag)
# If the agent has no children, then we use the agent itself
if len(ag_children) == 0:
families_list.append([ag])
# Otherwise, we add the tuple of namespaces/IDs for the children
else:
families_list.append(ag_children)
# Now, put together new statements frmo the cross product of the
# expanded family members
for ag_combo in itertools.product(*families_list):
# Create new agents based on the namespaces/IDs, with
# appropriate name and db_refs entries
child_agents = []
for ag_entry in ag_combo:
# If we got an agent, or None, that means there were no
# children; so we use the original agent rather than
# construct a new agent
if ag_entry is None or isinstance(ag_entry, Agent):
new_agent = ag_entry
# Otherwise, create a new agent from the ns/ID
elif isinstance(ag_entry, tuple):
# FIXME FIXME FIXME
# This doesn't reproduce agent state from the original
# family-level statements!
ag_ns, ag_id = ag_entry
new_agent = _agent_from_ns_id(ag_ns, ag_id)
else:
raise Exception('Unrecognized agent entry type.')
# Add agent to our list of child agents
child_agents.append(new_agent)
# Create a copy of the statement
new_stmt = deepcopy(stmt)
# Replace the agents in the statement with the newly-created
# child agents
new_stmt.set_agent_list(child_agents)
# Add to list
new_stmts.append(new_stmt)
return new_stmts
def get_children(self, agent, ns_filter='HGNC'):
if agent is None:
return []
# Get the grounding for the agent
(ns, id) = agent.get_grounding()
# If there is no grounding for this agent, then return no children
# (empty list)
if ns is None or id is None:
return []
# Get URI for agent
ag_uri = self.entities.get_uri(ns, id)
# Look up the children for this family
children_uris = self.entities.get_children(ag_uri)
if not children_uris:
return []
# Parse children URI list into namespaces and ID
children_parsed = []
for child_uri in children_uris:
child_ns, child_id = self.entities.ns_id_from_uri(child_uri)
# If ns_filter is None, add in all children
if ns_filter is None:
children_parsed.append((child_ns, child_id))
# Otherwise, only add children with a matching namespace
elif child_ns == ns_filter:
children_parsed.append((child_ns, child_id))
return children_parsed
def complexes_from_hierarchy(self):
# Iterate over the partof_closure to determine all of the complexes
# and all of their members
all_complexes = {}
for subunit, complexes in self.entities.partof_closure.items():
for complex in complexes:
complex_subunits = all_complexes.get(complex, [])
complex_subunits.append(subunit)
all_complexes[complex] = complex_subunits
# Now iterate over all of the complexes and create Complex statements
complex_stmts = []
for complex, subunits in all_complexes.items():
# Create an Evidence object for the statement with the URI of the
# complex as the source_id
ev = Evidence(source_api='famplex', source_id=complex)
subunit_agents = [_agent_from_uri(su) for su in subunits]
complex_stmt = Complex(subunit_agents, evidence=[ev])
complex_stmts.append(complex_stmt)
return complex_stmts
def expanded_complexes_from_hierarchy(self):
complex_stmts = self.complexes_from_hierarchy()
expanded_complexes = self.expand_families(complex_stmts)
return expanded_complexes
def _agent_from_uri(uri):
ag_ns, ag_id = HierarchyManager.ns_id_from_uri(uri)
agent = _agent_from_ns_id(ag_ns, ag_id)
return agent
def _agent_from_ns_id(ag_ns, ag_id):
ag_name = ag_id
db_refs = {'TEXT': ag_name}
if ag_ns == 'HGNC':
hgnc_id = hgnc_client.get_hgnc_id(ag_id)
if hgnc_id is not None:
db_refs['HGNC'] = hgnc_id
up_id = hgnc_client.get_uniprot_id(hgnc_id)
if up_id is not None:
db_refs['UP'] = up_id
else:
if ag_id is not None:
db_refs[ag_ns] = ag_id
return Agent(ag_name, db_refs=db_refs)
| {
"content_hash": "a213aaf79321d7f0cc281520c0480666",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 80,
"avg_line_length": 43.05594405594405,
"alnum_prop": 0.5783660873802177,
"repo_name": "pvtodorov/indra",
"id": "25ad6da51c078337d8c4b577b3f5834ce7ea91b3",
"size": "6157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indra/tools/expand_families.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "169"
},
{
"name": "HTML",
"bytes": "17236"
},
{
"name": "JavaScript",
"bytes": "72960"
},
{
"name": "Python",
"bytes": "2660313"
},
{
"name": "Shell",
"bytes": "381"
}
],
"symlink_target": ""
} |
from openturns import *
from math import *
TESTPREAMBLE()
RandomGenerator.SetSeed(0)
try:
dim = 2
transformation = NatafIndependentCopulaEvaluation(dim)
print "transformation=", repr(transformation)
point = NumericalPoint(dim, 0.75)
print "transformation(", point, ")=", repr(transformation(point))
print "transformation parameters gradient=", repr(transformation.parametersGradient(point))
print "input dimension=", transformation.getInputDimension()
print "output dimension=", transformation.getOutputDimension()
except:
import sys
print "t_NatafIndependentCopulaEvaluation_std.py", sys.exc_type, sys.exc_value
| {
"content_hash": "d0a7ec51a5b58997d9da0bd295cb0c53",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 95,
"avg_line_length": 32.85,
"alnum_prop": 0.7488584474885844,
"repo_name": "sofianehaddad/ot-svn",
"id": "fb29a2645a629cf9ce5972947549bd1a5605e87f",
"size": "681",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/test/t_NatafIndependentCopulaEvaluation_std.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6498"
},
{
"name": "C",
"bytes": "455749"
},
{
"name": "C++",
"bytes": "10021345"
},
{
"name": "CMake",
"bytes": "240050"
},
{
"name": "FORTRAN",
"bytes": "299"
},
{
"name": "Makefile",
"bytes": "12372"
},
{
"name": "NSIS",
"bytes": "26263"
},
{
"name": "Python",
"bytes": "1221927"
},
{
"name": "R",
"bytes": "11141"
},
{
"name": "Scilab",
"bytes": "2612"
},
{
"name": "Shell",
"bytes": "20403"
},
{
"name": "TeX",
"bytes": "4250"
},
{
"name": "Visual Basic",
"bytes": "3294"
}
],
"symlink_target": ""
} |
import os
import time
import click
import logging
import subprocess
import selenium.webdriver
from urllib.request import pathname2url
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
from . import run_command, absjoin
MAKE_COMMAND = ['make', 'html']
class SphinxEventHandler(PatternMatchingEventHandler):
"""Rebuild and refresh on every change event."""
def __init__(
self, patterns=None, ignore_patterns=None,
ignore_directories=False, case_sensitive=False
):
super(SphinxEventHandler, self).__init__(
patterns, ignore_patterns, ignore_directories, case_sensitive)
path = os.path.join('_build', 'html', 'index.html')
url = 'file:///' + pathname2url(os.path.abspath(path))
self.driver = selenium.webdriver.Chrome()
self.driver.get(url)
def cleanup(self):
self.driver.quit()
def _run(self):
print('creating HTML docs')
subprocess.call(MAKE_COMMAND)
self.driver.refresh()
logging.info("Rebuild and refreshed")
def on_moved(self, event):
super(SphinxEventHandler, self).on_moved(event)
self._run()
what = 'directory' if event.is_directory else 'file'
logging.info("Moved %s: from %s to %s", what, event.src_path,
event.dest_path)
def on_created(self, event):
super(SphinxEventHandler, self).on_created(event)
self._run()
what = 'directory' if event.is_directory else 'file'
logging.info("Created %s: %s", what, event.src_path)
def on_deleted(self, event):
super(SphinxEventHandler, self).on_deleted(event)
self._run()
what = 'directory' if event.is_directory else 'file'
logging.info("Deleted %s: %s", what, event.src_path)
def on_modified(self, event):
super(SphinxEventHandler, self).on_modified(event)
self._run()
what = 'directory' if event.is_directory else 'file'
logging.info("Modified %s: %s", what, event.src_path)
def clean_docs():
'''Clean the HTML documentation'''
run_command(['rm', '-rf', 'docs/_build/*'])
run_command(['rm', 'docs/yamicache.rst'])
run_command(['rm', 'docs/modules.rst'])
def build_docs():
'''Build HTML documentation'''
clean_docs()
click.echo(
run_command(['make', 'html'], cwd='docs')[0]
)
def serve_docs():
'''Serve the docs and watch for changes'''
THIS_DIR = os.path.abspath(os.path.dirname(__file__))
DOCS_PATH = absjoin(THIS_DIR, '..', 'docs')
os.chdir(DOCS_PATH)
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
event_handler = SphinxEventHandler(
patterns=['*.rst', '*.py'], ignore_patterns=['*.html'],
ignore_directories=["_build"])
observer = Observer()
observer.schedule(
event_handler, path='.', recursive=True)
observer.schedule(
event_handler, path='..', recursive=False)
observer.schedule(
event_handler, path='../yamicache', recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
event_handler.cleanup()
observer.join()
| {
"content_hash": "0f264efa58c871c275164f895b312fa1",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 74,
"avg_line_length": 28.939655172413794,
"alnum_prop": 0.6148346738159071,
"repo_name": "mtik00/yamicache",
"id": "48489a41441d7843c9b10779311b0e4f6fcbe0d7",
"size": "3400",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__manage/docs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46773"
}
],
"symlink_target": ""
} |
"""Like pprint, but with types except for dictionary keys."""
__all__ = ["pprint", "pformat"]
__author__ = "Carl Bordum Hansen"
__license__ = "MIT"
import pprint as _pprint
import contextlib
def _new_safe_repr(object, context, maxlevels, level):
"""Return object type name except for dict keys.
Like `pprint._safe_repr`, but returns type name of object instead
of object repr except for dictionary keys. Also formats lists and
tuples nicely.
Used to overwrite `pprint._safe_repr` with the context manager
`change_pprint_repr`.
"""
typerepr = lambda object: type(object).__name__
type_ = type(object)
if type_ in _pprint._builtin_scalars:
return typerepr(object), True, False
r = getattr(type_, "__repr__", None)
if issubclass(type_, dict) and r is dict.__repr__:
if not object:
return "dict", True, False
context[id(object)] = 1
readable = True
recursive = False
level += 1
pairs = []
for k, v in object.items():
vrepr, vreadable, recur = _new_safe_repr(v, context, maxlevels, level)
pairs.append("%s: %s" % (repr(k), vrepr))
readable = readable and vreadable
if recur:
recursive = True
del context[id(object)]
return "{%s}" % ", ".join(pairs), readable, recursive
if issubclass(type_, (list, tuple)):
if issubclass(type_, list):
if not object:
return "list", True, False
format = "[%s]"
else: # its a tuple
if not object:
return "tuple", True, False
format = "(%s)" if len(object) != 1 else "(%s,)"
context[id(object)] = 1
readable = True
recursive = False
items = []
level += 1
for item in object:
irepr, ireadable, irecur = _new_safe_repr(item, context, maxlevels, level)
items.append(irepr)
if not ireadable:
readable = False
if irecur:
recursion = True
del context[id(object)]
if len(set(items)) == 1:
items = [items[0]]
return format % ", ".join(items), readable, recursive
return typerepr(object), True, False
class DatatypingPrettyPrinter(_pprint.PrettyPrinter):
def format(self, object, context, maxlevels, level):
"""
Override format to call _new_safe_repr
"""
return _new_safe_repr(object, context, maxlevels, level)
def _format_dict_items(self, items, stream, indent, allowance, context, level):
write = stream.write
indent += self._indent_per_level
last_index = len(items) - 1
write("\n")
write(" " * indent)
for i, (key, ent) in enumerate(items):
last = i == last_index
rep = repr(key)
write(rep)
write(": ")
self._format(ent, stream, indent, allowance if last else 1, context, level)
write(",\n")
if last:
write((" " * indent)[: -self._indent_per_level])
else:
write(" " * indent)
def pprint(object, stream=None, indent=4, width=80, depth=None, compact=False):
"""Pretty-prints the data structure."""
DatatypingPrettyPrinter(
stream=stream,
indent=indent,
width=width,
depth=depth,
compact=compact
).pprint(object)
def pformat(object, indent=4, width=80, depth=None, compact=False):
"""Return the pretty printed data structure of *object*."""
return DatatypingPrettyPrinter(
indent=indent, width=width, depth=depth, compact=compact
).pformat(object)
| {
"content_hash": "e391732f5bc3803c786ae9097c9e3939",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 87,
"avg_line_length": 32.582608695652176,
"alnum_prop": 0.562583400053376,
"repo_name": "Zaab1t/datatyping",
"id": "cdd92478cc84dc412db75badfca1d7ca0cfa26f1",
"size": "3747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datatyping/printer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16359"
}
],
"symlink_target": ""
} |
from neutron_lib import context as n_ctx
from oslo_log import log as logging
from neutron.api.rpc.callbacks.consumer import registry as registry_rpc
from neutron.api.rpc.callbacks import events as events_rpc
from neutron.api.rpc.handlers import resources_rpc
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.common import rpc as n_rpc
from neutron import objects
LOG = logging.getLogger(__name__)
objects.register_objects()
class RemoteResourceCache(object):
"""Retrieves and stashes logical resources in their OVO format.
This is currently only compatible with OVO objects that have an ID.
"""
def __init__(self, resource_types):
self.resource_types = resource_types
self._cache_by_type_and_id = {rt: {} for rt in self.resource_types}
self._deleted_ids_by_type = {rt: set() for rt in self.resource_types}
# track everything we've asked the server so we don't ask again
self._satisfied_server_queries = set()
self._puller = resources_rpc.ResourcesPullRpcApi()
def _type_cache(self, rtype):
if rtype not in self.resource_types:
raise RuntimeError("Resource cache not tracking %s" % rtype)
return self._cache_by_type_and_id[rtype]
def start_watcher(self):
self._watcher = RemoteResourceWatcher(self)
def get_resource_by_id(self, rtype, obj_id):
"""Returns None if it doesn't exist."""
if obj_id in self._deleted_ids_by_type[rtype]:
return None
cached_item = self._type_cache(rtype).get(obj_id)
if cached_item:
return cached_item
# try server in case object existed before agent start
self._flood_cache_for_query(rtype, id=(obj_id, ))
return self._type_cache(rtype).get(obj_id)
def _flood_cache_for_query(self, rtype, **filter_kwargs):
"""Load info from server for first query.
Queries the server if this is the first time a given query for
rtype has been issued.
"""
query_ids = self._get_query_ids(rtype, filter_kwargs)
if query_ids.issubset(self._satisfied_server_queries):
# we've already asked the server this question so we don't
# ask directly again because any updates will have been
# pushed to us
return
context = n_ctx.get_admin_context()
resources = self._puller.bulk_pull(context, rtype,
filter_kwargs=filter_kwargs)
for resource in resources:
if self._is_stale(rtype, resource):
# if the server was slow enough to respond the object may have
# been updated already and pushed to us in another thread.
LOG.debug("Ignoring stale update for %s: %s", rtype, resource)
continue
self._type_cache(rtype)[resource.id] = resource
LOG.debug("%s resources returned for queries %s", len(resources),
query_ids)
self._satisfied_server_queries.update(query_ids)
def _get_query_ids(self, rtype, filters):
"""Turns filters for a given rypte into a set of query IDs.
This can result in multiple queries due to the nature of the query
processing on the server side. Since multiple values are treated as
an OR condition, a query for {'id': ('1', '2')} is equivalent
to a query for {'id': ('1',)} and {'id': ('2')}. This method splits
the former into the latter to ensure we aren't asking the server
something we already know.
"""
query_ids = set()
for k, values in tuple(sorted(filters.items())):
if len(values) > 1:
for v in values:
new_filters = filters.copy()
new_filters[k] = (v, )
query_ids.update(self._get_query_ids(rtype, new_filters))
break
else:
# no multiple value filters left so add an ID
query_ids.add((rtype, ) + tuple(sorted(filters.items())))
return query_ids
def get_resources(self, rtype, filters):
"""Find resources that match key:values in filters dict.
If the attribute on the object is a list, each value is checked if it
is in the list.
The values in the dicionary for a single key are matched in an OR
fashion.
"""
self._flood_cache_for_query(rtype, **filters)
def match(obj):
for key, values in filters.items():
for value in values:
attr = getattr(obj, key)
if isinstance(attr, (list, tuple, set)):
# attribute is a list so we check if value is in
# list
if value in attr:
break
elif value == attr:
break
else:
# no match found for this key
return False
return True
return self.match_resources_with_func(rtype, match)
def match_resources_with_func(self, rtype, matcher):
"""Returns a list of all resources satisfying func matcher."""
# TODO(kevinbenton): this is O(N), offer better lookup functions
return [r for r in self._type_cache(rtype).values()
if matcher(r)]
def _is_stale(self, rtype, resource):
"""Determines if a given resource update is safe to ignore.
It can be safe to ignore if it has already been deleted or if
we have a copy with a higher revision number.
"""
if resource.id in self._deleted_ids_by_type[rtype]:
return True
existing = self._type_cache(rtype).get(resource.id)
if existing and existing.revision_number > resource.revision_number:
# NOTE(kevinbenton): we could be strict and check for >=, but this
# makes us more tolerant of bugs on the server where we forget to
# bump the revision_number.
return True
return False
def record_resource_update(self, context, rtype, resource):
"""Takes in an OVO and generates an event on relevant changes.
A change is deemed to be relevant if it is not stale and if any
fields changed beyond the revision number and update time.
Both creates and updates are handled in this function.
"""
if self._is_stale(rtype, resource):
LOG.debug("Ignoring stale update for %s: %s", rtype, resource)
return
existing = self._type_cache(rtype).get(resource.id)
self._type_cache(rtype)[resource.id] = resource
changed_fields = self._get_changed_fields(existing, resource)
if not changed_fields:
LOG.debug("Received resource %s update without any changes: %s",
rtype, resource.id)
return
if existing:
LOG.debug("Resource %s %s updated (revision_number %s->%s). "
"Old fields: %s New fields: %s",
rtype, existing.id, existing.revision_number,
resource.revision_number,
{f: existing.get(f) for f in changed_fields},
{f: resource.get(f) for f in changed_fields})
else:
LOG.debug("Received new resource %s: %s", rtype, resource)
# local notification for agent internals to subscribe to
registry.notify(rtype, events.AFTER_UPDATE, self,
context=context, changed_fields=changed_fields,
existing=existing, updated=resource,
resource_id=resource.id)
def record_resource_delete(self, context, rtype, resource_id):
# deletions are final, record them so we never
# accept new data for the same ID.
LOG.debug("Resource %s deleted: %s", rtype, resource_id)
# TODO(kevinbenton): we need a way to expire items from the set at
# some TTL so it doesn't grow indefinitely with churn
if resource_id in self._deleted_ids_by_type[rtype]:
LOG.debug("Skipped duplicate delete event for %s", resource_id)
return
self._deleted_ids_by_type[rtype].add(resource_id)
existing = self._type_cache(rtype).pop(resource_id, None)
# local notification for agent internals to subscribe to
registry.notify(rtype, events.AFTER_DELETE, self, context=context,
existing=existing, resource_id=resource_id)
def _get_changed_fields(self, old, new):
"""Returns changed fields excluding update time and revision."""
new = new.to_dict()
changed = set(new)
if old:
for k, v in old.to_dict().items():
if v == new.get(k):
changed.discard(k)
for ignore in ('revision_number', 'updated_at'):
changed.discard(ignore)
return changed
class RemoteResourceWatcher(object):
"""Converts RPC callback notifications to local registry notifications.
This allows a constructor to listen for RPC callbacks for a given
dictionary of resources and fields desired.
This watcher will listen to the RPC callbacks as sent on the wire and
handle things like out-of-order message detection and throwing away
updates to fields the constructor doesn't care about.
All watched resources must be primary keyed on a field called 'id' and
have a standard attr revision number.
"""
def __init__(self, remote_resource_cache):
self.rcache = remote_resource_cache
self._init_rpc_listeners()
def _init_rpc_listeners(self):
endpoints = [resources_rpc.ResourcesPushRpcCallback()]
self._connection = n_rpc.create_connection()
for rtype in self.rcache.resource_types:
registry_rpc.register(self.resource_change_handler, rtype)
topic = resources_rpc.resource_type_versioned_topic(rtype)
self._connection.create_consumer(topic, endpoints, fanout=True)
self._connection.consume_in_threads()
def resource_change_handler(self, context, rtype, resources, event_type):
for r in resources:
if event_type == events_rpc.DELETED:
self.rcache.record_resource_delete(context, rtype, r.id)
else:
# creates and updates are treated equally
self.rcache.record_resource_update(context, rtype, r)
| {
"content_hash": "b6324d3bd093492c30a93241cbc73fbb",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 78,
"avg_line_length": 44.2125,
"alnum_prop": 0.6088021864103289,
"repo_name": "eayunstack/neutron",
"id": "37bd0f50e4d9b47737e1daed46a81690652df72d",
"size": "11209",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/agent/resource_cache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "10593193"
},
{
"name": "Shell",
"bytes": "8804"
}
],
"symlink_target": ""
} |
"""
"""
import os
import vcr
jiravcr = vcr.VCR(
record_mode = 'once',
match_on = ['uri', 'method'],
)
class BridgeTests:
def test_get_issue(self):
with jiravcr.use_cassette(os.path.join(self.vcr_directory, "issue.yaml")):
self.assertIsNotNone(self.bridge.get_issue("TP-9"))
def test_get_statuses(self):
with jiravcr.use_cassette(os.path.join(self.vcr_directory, "status.yaml")):
self.assertIsNotNone(self.bridge.get_statuses())
def test_get_projects(self):
with jiravcr.use_cassette(os.path.join(self.vcr_directory, "project.yaml")):
self.assertIsNotNone(self.bridge.get_projects())
def test_get_priorities(self):
with jiravcr.use_cassette(
os.path.join(self.vcr_directory, "priorities.yaml")):
self.assertIsNotNone(self.bridge.get_priorities())
def test_get_transitions(self):
with jiravcr.use_cassette(
os.path.join(self.vcr_directory, "transitions.yaml")):
self.assertIsNotNone(self.bridge.get_available_transitions("TP-9"))
def test_get_resolutions(self):
with jiravcr.use_cassette(
os.path.join(self.vcr_directory, "resolutions.yaml")):
self.assertIsNotNone(self.bridge.get_resolutions())
def test_get_project_components(self):
with jiravcr.use_cassette(
os.path.join(self.vcr_directory, "components.yaml")):
self.assertIsNotNone(self.bridge.get_components("TP"))
def test_get_issue_types(self):
with jiravcr.use_cassette(os.path.join(self.vcr_directory, "types.yaml")):
self.assertIsNotNone(self.bridge.get_issue_types())
def test_get_sub_task_issue_types(self):
with jiravcr.use_cassette(
os.path.join(self.vcr_directory, "subtypes.yaml")):
self.assertIsNotNone(self.bridge.get_issue_types())
def test_get_filters(self):
with jiravcr.use_cassette(os.path.join(self.vcr_directory, "filters.yaml")):
self.assertIsNotNone(self.bridge.get_filters())
def test_search_free_text(self):
with jiravcr.use_cassette(os.path.join(self.vcr_directory, "search.yaml")):
self.assertTrue(
len(
self.bridge.search_issues("test jira-cli")
) == 1)
def test_search_jql(self):
with jiravcr.use_cassette(
os.path.join(self.vcr_directory, "search-jql.yaml")):
self.assertTrue(
len(
self.bridge.search_issues_jql("summary~jira-cli")
) == 1)
def test_filter_fail(self):
with jiravcr.use_cassette(
os.path.join(self.vcr_directory, "filter-search-fail.yaml")):
self.assertIsNotNone(
self.bridge.get_issues_by_filter("test-filter")
)
def test_filter_fail(self):
with jiravcr.use_cassette(
os.path.join(self.vcr_directory, "filter-search.yaml")):
self.assertIsNotNone(
self.bridge.get_issues_by_filter("test filter", "blah")
)
def test_create_issue(self):
with jiravcr.use_cassette(os.path.join(self.vcr_directory, "create.yaml")):
self.assertIsNotNone(
self.bridge.create_issue("TP", summary='test-create-issue')
)
def test_create_child_issue(self):
with jiravcr.use_cassette(
os.path.join(self.vcr_directory, "childcreate.yaml")):
self.assertIsNotNone(
self.bridge.create_issue("TP", type='sub-task',
summary='test-create-issue',
parent='TP-10')
) | {
"content_hash": "07b2419c66de228861badf3bae3d4078",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 84,
"avg_line_length": 37.78,
"alnum_prop": 0.5897300158814187,
"repo_name": "rexyeah/jira-cli",
"id": "69281eacd965d61d0e90726808a57440a4e60c26",
"size": "3778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/common_bridge_cases.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "120668"
},
{
"name": "Shell",
"bytes": "231"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.